1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                           License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42 
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_cuda.hpp"
46 #include "../op_halide.hpp"
47 #include "../op_inf_engine.hpp"
48 #include "../ie_ngraph.hpp"
49 #include "../op_vkcom.hpp"
50 
51 #include <opencv2/core/utils/logger.hpp>
52 
53 #include "opencv2/core/hal/hal.hpp"
54 #include "opencv2/core/hal/intrin.hpp"
55 #include <iostream>
56 #include <numeric>
57 
58 #ifdef HAVE_OPENCL
59 #include "opencl_kernels_dnn.hpp"
60 using namespace cv::dnn::ocl4dnn;
61 #endif
62 #ifdef HAVE_TENGINE
63 #include "../tengine4dnn/include/tengine_graph_convolution.hpp"
64 #endif
65 
66 #ifdef HAVE_CUDA
67 #include "../cuda4dnn/primitives/convolution.hpp"
68 #include "../cuda4dnn/primitives/transpose_convolution.hpp"
69 using namespace cv::dnn::cuda4dnn;
70 #endif
71 
72 namespace cv
73 {
74 namespace dnn
75 {
76 
77 class BaseConvolutionLayerImpl : public ConvolutionLayer
78 {
79 public:
80     bool fusedWeights, fusedBias;
81     std::vector<double> weightsMultipliers;
BaseConvolutionLayerImpl(const LayerParams & params)82     BaseConvolutionLayerImpl(const LayerParams &params)
83     {
84         setParamsFrom(params);
85         getConvolutionKernelParams(params, kernel_size, pads_begin, pads_end, strides, dilations, padMode, adjust_pads);
86 
87         numOutput = params.get<int>("num_output");
88         int ngroups = params.get<int>("group", 1);
89         CV_Assert(numOutput % ngroups == 0);
90 
91         if (kernel_size.size() == 2) {
92             kernel = Size(kernel_size[1], kernel_size[0]);
93             stride = Size(strides[1], strides[0]);
94             for (int i = 0; i < pads_begin.size(); i++) {
95                 if (pads_begin[i] != pads_end[i])
96                     CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
97             }
98             pad = Size(pads_begin[1], pads_begin[0]);
99             dilation = Size(dilations[1], dilations[0]);
100 
101             adjustPad.height = adjust_pads[0];
102             adjustPad.width = adjust_pads[1];
103         }
104 
105         for (int i = 0; i < adjust_pads.size(); i++) {
106             CV_Assert(adjust_pads[i] < strides[i]);
107         }
108 
109         fusedWeights = false;
110         fusedBias = false;
111     }
112 
finalize(InputArrayOfArrays inputs_arr,OutputArrayOfArrays outputs_arr)113     virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
114     {
115         std::vector<Mat> inputs, outputs;
116         inputs_arr.getMatVector(inputs);
117         outputs_arr.getMatVector(outputs);
118 
119         CV_Assert((inputs.size() > outputs.size() && blobs.empty()) ||
120                   (!inputs.empty() && (blobs.size() == 1 || blobs.size() == 2)));
121         MatSize weightShape = blobs.empty() ? inputs[1].size : blobs[0].size;
122 
123         CV_Assert(inputs[0].dims == outputs[0].dims);
124         if (weightShape.dims() == 3)
125         {
126             kernel_size.assign(1, kernel_size[0]);
127             strides.assign(1, strides[0]);
128             dilations.assign(1, dilations[0]);
129             pads_begin.assign(1, pads_begin[0]);
130             pads_end.assign(1, pads_end[0]);
131         }
132         CV_Assert(weightShape.dims() == kernel_size.size() + 2);
133         for (int i = 0; i < kernel_size.size(); i++) {
134             CV_Assert(weightShape[i + 2] == kernel_size[i]);
135         }
136 
137         const Mat &input = inputs[0];
138         CV_Assert(((input.dims == 3 && kernel_size.size() == 1) || input.dims == 4 || input.dims == 5) && (input.type() == CV_32F || input.type() == CV_16S));
139         for (size_t i = 0; i < outputs.size(); i++)
140         {
141             CV_Assert(inputs[i].type() == input.type());
142             CV_Assert(((input.dims == 3 && kernel_size.size() == 1) || inputs[i].dims == 4 || inputs[i].dims == 5) && inputs[i].size[1] == input.size[1]);
143             for (int j = 0; j < inputs[i].dims; j++) {
144                 CV_Assert(inputs[i].size[j] == input.size[j]);
145             }
146         }
147 
148         std::vector<int> inpShape;
149         std::vector<int> outShape;
150         for (int i = 2; i < inputs[0].dims; i++) {
151             inpShape.push_back(inputs[0].size[i]);
152             outShape.push_back(outputs[0].size[i]);
153         }
154         getConvPoolPaddings(inpShape, kernel_size, strides, padMode, pads_begin, pads_end);
155         if (pads_begin.size() == 2) {
156             for (int i = 0; i < pads_begin.size(); i++) {
157                 if (pads_begin[i] != pads_end[i])
158                     CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in convolution layer");
159             }
160             pad = Size(pads_begin[1], pads_begin[0]);
161         }
162         fusedWeights = false;
163         fusedBias = false;
164     }
165 
hasBias() const166     bool hasBias() const
167     {
168         return blobs.size() >= 2;
169     }
170 
171     virtual MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const = 0;
is1x1() const172     bool is1x1() const
173     {
174         return (kernel.height == 1 && kernel.width == 1) &&
175                (stride.height == 1 && stride.width == 1) &&
176                (dilation.height == 1 && dilation.width == 1);
177     }
178 
tryFuse(Ptr<Layer> & top)179     virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
180     {
181         Ptr<BlankLayer> blank_layer = top.dynamicCast<BlankLayer>();
182         if (blank_layer)
183             return true;
184 
185         Mat w, b;
186         top->getScaleShift(w, b);
187         if (!w.empty() || !b.empty())
188         {
189             fuseWeights(w, b);
190             fusedWeights = fusedWeights || !w.empty();
191             fusedBias = fusedBias || (hasBias() && !w.empty()) || !b.empty();
192             return true;
193         }
194         return false;
195     }
196 
197     virtual void fuseWeights(const Mat& w_, const Mat& b_) = 0;
198 
applyHalideScheduler(Ptr<BackendNode> & node,const std::vector<Mat * > & inputs,const std::vector<Mat> & outputs,int targetId) const199     virtual void applyHalideScheduler(Ptr<BackendNode>& node,
200                                       const std::vector<Mat*> &inputs,
201                                       const std::vector<Mat> &outputs,
202                                       int targetId) const CV_OVERRIDE
203     {
204 #ifdef HAVE_HALIDE
205         if (targetId != DNN_TARGET_CPU)
206         {
207             Layer::applyHalideScheduler(node, inputs, outputs, targetId);
208             return;
209         }
210         Halide::Var x("x"), y("y"), c("c"), n("n"), tile("tile"), yi("yi"), yo("yo"), co("co"), ci("ci");
211         Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
212         Halide::Func& padded_input = node.dynamicCast<HalideBackendNode>()->funcs[0];
213 
214         int outW, outH, outC, outN;
215         getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
216 
217         if (outW == 1 || outH <= 2)
218             return;
219 
220         if (is1x1() || outC <= 16)
221             top.reorder(x, c, y)
222                .split(y, yo, yi, 2)
223                .fuse(yo, n, tile)
224                .parallel(tile)
225                .unroll(yi)
226                .vectorize(x, outW >= 16 ? 16 : outW);
227         else
228             top.reorder(x, c, y)
229                .split(y, yo, yi, 2)
230                .split(c, co, ci, 16)
231                .fuse(yo, co, tile).fuse(n, tile, tile)
232                .parallel(tile)
233                .unroll(yi)
234                .vectorize(x, outW >= 16 ? 16 : outW);
235         padded_input.compute_at(top, yi);
236 #endif  // HAVE_HALIDE
237     }
238 };
239 
240 
241 #define IS_POWER_LAYER(layer) \
242             (!layer.empty() && !layer->type.compare("Power"))
243 //TODO: simultaneously convolution and bias addition for cache optimization
244 class ConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
245 {
246 public:
247     enum { VEC_ALIGN = 8, DFT_TYPE = CV_32F };
248     Mat weightsMat;
249     std::vector<float> biasvec;
250     std::vector<float> reluslope;
251     Ptr<ActivationLayer> activ;
252 
253 #ifdef HAVE_OPENCL
254     Ptr<OCL4DNNConvSpatial<float> > convolutionOp;
255     std::vector<UMat> umat_blobs;
256     bool newActiv;
257     ocl4dnnFusedActiv_t activType;
258     float power;
259 #endif
260 
261 #ifdef HAVE_TENGINE
262     teng_graph_t tengine_graph;
263 #endif
264 
265 #ifdef HAVE_CUDA
266     cuda4dnn::ConvolutionConfiguration::FusionMode cudaFusionMode;
267     cuda4dnn::ConvolutionConfiguration::ActivationType cudaActType;
268     float cuda_relu_slope, cuda_crelu_floor, cuda_crelu_ceil;
269     float cuda_power_exp, cuda_power_scale, cuda_power_shift;
270 #endif
271 
ConvolutionLayerImpl(const LayerParams & params)272     ConvolutionLayerImpl(const LayerParams &params) : BaseConvolutionLayerImpl(params)
273     {
274 #ifdef HAVE_OPENCL
275         newActiv = false;
276         activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
277         power = 0.f;
278 #endif
279 
280 #ifdef HAVE_CUDA
281         cudaFusionMode = cuda4dnn::ConvolutionConfiguration::FusionMode::NONE;
282         cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::IDENTITY;
283 #endif
284 #ifdef HAVE_TENGINE
285         tengine_graph=NULL;
286 #endif
287     }
288 #ifdef HAVE_TENGINE
~ConvolutionLayerImpl()289     ~ConvolutionLayerImpl()
290     {
291         if(NULL != tengine_graph )
292         {
293             tengine_release(tengine_graph);
294         }
295     }
296 #endif
297 
computeColRowShape(const MatShape & inpShape,const MatShape & outShape) const298     MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
299     {
300         CV_Assert(!blobs.empty());
301         int dims = inpShape.size();
302         int inpD = dims == 5 ? inpShape[2] : 1;
303         int inpH = inpShape[dims - 2];
304         int inpW = inpShape.back();
305         int inpGroupCn = blobs[0].size[1];
306         int ksize = inpGroupCn * std::accumulate(kernel_size.begin(), kernel_size.end(),
307                                                  1, std::multiplies<size_t>());
308         return shape(inpD * inpH * inpW, ksize);
309     }
310 
supportBackend(int backendId)311     virtual bool supportBackend(int backendId) CV_OVERRIDE
312     {
313         size_t ksize = kernel_size.size();
314 #ifdef HAVE_CUDA
315         if (backendId == DNN_BACKEND_CUDA)
316         {
317             /* only 1d, 2d and 3d convolutions supported */
318             if (ksize > 0 && ksize <= 3)
319                 return true;
320 
321             return false;
322         }
323 #endif
324 #ifdef HAVE_INF_ENGINE
325         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH)
326         {
327             bool isArmTarget = preferableTarget == DNN_TARGET_CPU && isArmComputePlugin();
328             if (isArmTarget && blobs.empty())
329                 return false;
330             if (ksize == 1)
331                 return isArmTarget;
332             if (ksize == 3)
333                 return preferableTarget != DNN_TARGET_MYRIAD && !isArmTarget;
334             bool isMyriad = preferableTarget == DNN_TARGET_MYRIAD || preferableTarget == DNN_TARGET_HDDL;
335             if ((backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019 || !isMyriad) && blobs.empty())
336                 return false;
337             return (!isMyriad || dilation.width == dilation.height);
338         }
339 #endif
340         if (backendId == DNN_BACKEND_OPENCV)
341             return ksize >= 1 && ksize <= 3;
342 #ifdef HAVE_HALIDE
343         if (backendId == DNN_BACKEND_HALIDE)
344             return ksize == 2 && !blobs.empty();
345 #endif
346 #ifdef HAVE_VULKAN
347         if (backendId == DNN_BACKEND_VKCOM)
348             return ksize == 2;
349 #endif
350         return false;
351     }
352 
getMemoryShapes(const std::vector<MatShape> & inputs,const int requiredOutputs,std::vector<MatShape> & outputs,std::vector<MatShape> & internals) const353     bool getMemoryShapes(const std::vector<MatShape> &inputs,
354                          const int requiredOutputs,
355                          std::vector<MatShape> &outputs,
356                          std::vector<MatShape> &internals) const CV_OVERRIDE
357     {
358         CV_Assert(!blobs.empty() || inputs.size() > 1);
359         const int* weightShape = blobs.empty() ? &inputs[1][0] : blobs[0].size.p;
360         CV_Assert(!hasBias() || blobs[1].total() == (size_t)weightShape[0]);
361 
362         internals.clear();
363 
364         CV_Assert(inputs.size() != 0);
365         std::vector<int> inpShape(inputs[0].begin() + 2, inputs[0].end());
366 
367         int outCn = weightShape[0];
368         std::vector<int> outShape;
369         outShape.push_back(inputs[0][0]);
370         outShape.push_back(outCn);
371 
372         int inpCn = inputs[0][1];
373         if (padMode.empty())
374         {
375             for (int i = 0; i < inpShape.size(); i++)
376                 outShape.push_back((inpShape[i] + pads_begin[i] + pads_end[i] - dilations[i] * (kernel_size[i] - 1) - 1) / strides[i] + 1);
377         }
378         else
379         {
380             getConvPoolOutParams(inpShape, kernel_size, strides, padMode, dilations, outShape);
381         }
382 
383         int ngroups = inpCn / weightShape[1];
384         if (ngroups == 0 || ngroups * weightShape[1] != inpCn)
385             CV_Error(Error::StsError, format("Number of input channels should "
386                      "be multiple of %d but got %d", weightShape[1], inpCn));
387         CV_Assert(ngroups > 0 && inpCn % ngroups == 0 && outCn % ngroups == 0);
388 
389         outputs.resize(1, outShape);
390 
391         return false;
392     }
393 
finalize(InputArrayOfArrays inputs_arr,OutputArrayOfArrays outputs_arr)394     virtual void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
395     {
396         BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
397 
398         std::vector<Mat> inputs;
399         inputs_arr.getMatVector(inputs);
400         // prepare weightsMat where each row is aligned and has enough zero padding on the right to
401         // use vectorized (i.e. with intrinsics) loops without tail processing
402         if (!blobs.empty())
403         {
404             Mat wm = blobs[0].reshape(1, numOutput);
405             if( wm.step1() % VEC_ALIGN != 0 )
406             {
407                 int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
408                 Mat wm_buffer = Mat(numOutput, newcols, wm.type());
409                 Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
410                 wm_padding.setTo(Scalar::all(0.));
411                 Mat wm_aligned = wm_buffer.colRange(0, wm.cols);
412                 wm.copyTo(wm_aligned);
413                 wm = wm_aligned;
414             }
415             weightsMat = wm;
416         }
417         else
418         {
419             // initialized in .forward()
420             weightsMat.release();
421         }
422 
423         weightsMultipliers.assign(numOutput, 1.0);
424 
425         Mat biasMat = hasBias() ? blobs[1].reshape(1, numOutput) : Mat();
426         biasvec.resize(numOutput+2);
427         if( biasMat.empty() )
428         {
429             for(int i = 0; i < numOutput; i++ )
430                 biasvec[i] = 0.f;
431         }
432         else
433         {
434             for(int i = 0; i < numOutput; i++ )
435                 biasvec[i] = biasMat.at<float>(i);
436         }
437 #ifdef HAVE_TENGINE
438         if(NULL != tengine_graph )
439         {
440             tengine_release(tengine_graph);
441             tengine_graph = NULL ;
442         }
443 #endif
444 #ifdef HAVE_OPENCL
445         convolutionOp.release();
446 #endif
447     }
448 
setActivation(const Ptr<ActivationLayer> & layer)449     bool setActivation(const Ptr<ActivationLayer>& layer) CV_OVERRIDE
450     {
451         if ((!activ.empty() && !layer.empty()) || blobs.empty())
452             return false;
453 
454         activ = layer;
455         if (activ.empty())
456             reluslope.clear();
457 #ifdef HAVE_OPENCL
458         newActiv = true;
459         activType = OCL4DNN_CONV_FUSED_ACTIV_NONE;
460 
461         if (IS_DNN_OPENCL_TARGET(preferableTarget))
462         {
463             Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
464             if (!activ_power.empty())
465             {
466                 if (activ_power->scale != 1.0f)  // not supported well by implementation, #17964
467                 {
468                     // FIXIT no way to check number of blobs (like, eltwise input)
469                     CV_LOG_DEBUG(NULL, "DNN/OpenCL: can't configure Power activation (scale != 1.0f)");
470                     activ.release();
471                     newActiv = false;
472                     return false;
473                 }
474                 if (activ_power->scale != 1.f || activ_power->shift != 0.f)
475                 {
476                     const int outCh = blobs[0].size[0];
477                     fuseWeights(Mat(1, outCh, CV_32F, Scalar(activ_power->scale)),
478                                 Mat(1, outCh, CV_32F, Scalar(activ_power->shift)));
479                 }
480 
481                 power = activ_power->power;
482                 activType = OCL4DNN_CONV_FUSED_ACTIV_POWER;
483             }
484             Ptr<TanHLayer> activ_tanh = activ.dynamicCast<TanHLayer>();
485             if (!activ_tanh.empty())
486             {
487                 activType = OCL4DNN_CONV_FUSED_ACTIV_TANH;
488             }
489         }
490 #endif
491 
492 #ifdef HAVE_CUDA
493         if (activ.empty())
494         {
495             /* setActivation was called with empty argument => reset all fusions */
496             cudaFusionMode = cuda4dnn::ConvolutionConfiguration::FusionMode::NONE;
497             cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::IDENTITY;
498         }
499 
500         if(IS_DNN_CUDA_TARGET(preferableTarget))
501         {
502             CV_Assert(cudaFusionMode == ConvolutionConfiguration::FusionMode::NONE ||
503                       cudaFusionMode == ConvolutionConfiguration::FusionMode::ELTWISE_SUM);
504 
505             Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
506             if(!activ_relu.empty())
507             {
508                 cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::RELU;
509                 cuda_relu_slope = activ_relu->negativeSlope;
510             }
511 
512             Ptr<ReLU6Layer> activ_relu6 = activ.dynamicCast<ReLU6Layer>();
513             if(!activ_relu6.empty())
514             {
515                 cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::CLIPPED_RELU;
516                 cuda_crelu_floor = activ_relu6->minValue;
517                 cuda_crelu_ceil = activ_relu6->maxValue;
518             }
519 
520             Ptr<PowerLayer> activ_power = activ.dynamicCast<PowerLayer>();
521             if (!activ_power.empty())
522             {
523                 cuda_power_scale = activ_power->scale;
524                 cuda_power_shift = activ_power->shift;
525                 cuda_power_exp = activ_power->power;
526                 cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::POWER;
527             }
528 
529             Ptr<TanHLayer> activ_tanh = activ.dynamicCast<TanHLayer>();
530             if(!activ_tanh.empty())
531                 cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::TANH;
532 
533             Ptr<SigmoidLayer> activ_sigmoid = activ.dynamicCast<SigmoidLayer>();
534             if(!activ_sigmoid.empty())
535                 cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::SIGMOID;
536 
537             Ptr<SwishLayer> activ_swish = activ.dynamicCast<SwishLayer>();
538             if(!activ_swish.empty())
539                 cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::SWISH;
540 
541             Ptr<MishLayer> activ_mish = activ.dynamicCast<MishLayer>();
542             if(!activ_mish.empty())
543                 cudaActType = cuda4dnn::ConvolutionConfiguration::ActivationType::MISH;
544 
545             if (cudaActType == cuda4dnn::ConvolutionConfiguration::ActivationType::IDENTITY)
546             {
547                 /* no activation fused */
548                 activ.reset();
549             }
550             else
551             {
552                 /* activation was fused */
553                 if (cudaFusionMode == ConvolutionConfiguration::FusionMode::NONE) /* no previous fusion */
554                     cudaFusionMode = ConvolutionConfiguration::FusionMode::ACTIVATION; /* now activation */
555                 else if (cudaFusionMode == ConvolutionConfiguration::FusionMode::ELTWISE_SUM) /* previously eltwise was fused */
556                     cudaFusionMode = ConvolutionConfiguration::FusionMode::ELTWISE_SUM_THEN_ACTIVATION; /* now activation on eltwise output */
557             }
558         }
559 #endif
560         return !activ.empty();
561     }
562 
tryFuse(Ptr<Layer> & top)563     virtual bool tryFuse(Ptr<Layer>& top) CV_OVERRIDE
564     {
565 #ifdef HAVE_CUDA
566         if(IS_DNN_CUDA_TARGET(preferableTarget))
567         {
568             Ptr<EltwiseLayer> eltwise = top.dynamicCast<EltwiseLayer>();
569             if (!eltwise.empty()) // && eltwise->op == EltwiseLayer::SUM && eltwise->coeffs.empty())
570             {
571                 /* we also need to check that the eltwise input does not require shortcut mechanism
572                  * it's difficult to verify it here but we hope that `fuseLayers` has done the check already
573                  */
574                 if (cudaFusionMode == ConvolutionConfiguration::FusionMode::NONE)
575                 {
576                     /* no previous fusion */
577                     cudaFusionMode = ConvolutionConfiguration::FusionMode::ELTWISE_SUM; /* now eltwise */
578                     return true;
579                 }
580                 else if(cudaFusionMode == ConvolutionConfiguration::FusionMode::ACTIVATION)
581                 {
582                     /* previously an activation was fused */
583                     cudaFusionMode = ConvolutionConfiguration::FusionMode::ACTIVATION_THEN_ELTWISE_SUM;
584                     return true;
585                 }
586                 return false;
587             }
588         }
589 #endif
590         return BaseConvolutionLayerImpl::tryFuse(top);
591     }
592 
fuseWeights(const Mat & w_,const Mat & b_)593     void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
594     {
595         // Convolution weights have OIHW data layout. Parameters fusion in case of
596         // (conv(I) + b1 ) * w + b2
597         // means to replace convolution's weights to [w*conv(I)] and bias to [b1 * w + b2]
598         const int outCn = weightsMat.size[0];
599         Mat w = w_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(w_.at<float>(0))) : w_;
600         Mat b = b_.total() == 1 ? Mat(1, outCn, CV_32F, Scalar(b_.at<float>(0))) : b_;
601         CV_Assert_N(!weightsMat.empty(), biasvec.size() == outCn + 2,
602                     w.empty() || outCn == w.total(), b.empty() || outCn == b.total());
603 
604         if (!w.empty())
605         {
606             // Keep origin weights unchanged.
607             if (weightsMat.data == blobs[0].data)
608                 weightsMat = weightsMat.clone();
609 
610             Mat originWeights = blobs[0].reshape(1, outCn);
611             for (int i = 0; i < outCn; ++i)
612             {
613                 double wi = w.at<float>(i);
614                 weightsMultipliers[i] *= wi;
615                 cv::multiply(originWeights.row(i), weightsMultipliers[i], weightsMat.row(i));
616                 biasvec[i] *= wi;
617             }
618         }
619 
620         if (!b.empty())
621         {
622             for (int i = 0; i < outCn; ++i)
623                 biasvec[i] += b.at<float>(i);
624         }
625         biasvec[outCn] = biasvec[outCn+1] = biasvec[outCn-1];
626     }
627 
initVkCom(const std::vector<Ptr<BackendWrapper>> & inputs)628     virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
629     {
630 #ifdef HAVE_VULKAN
631         int out_channel = blobs[0].size[0];
632         bool has_bias = hasBias() || fusedBias;
633         int filter_size[2] = {kernel.height, kernel.width};
634         int pad_size[2] = {pad.height, pad.width};
635         int stride_size[2] = {stride.height, stride.width};
636         int dilation_size[2] = {dilation.height, dilation.width};
637         int activation = 0;
638         vkcom::Tensor input_tensor = VkComTensor(inputs[0]);
639         int in_channel = input_tensor.dimSize(1);
640         int group = in_channel / blobs[0].size[1];
641 
642         // TODO: support group > 1
643         if (group != 1)
644             return Ptr<BackendNode>();
645 
646         int padding_mode;
647         if (padMode.empty())
648         {
649             padding_mode = vkcom::kPaddingModeCaffe;
650         }
651         else if (padMode == "VALID")
652         {
653             padding_mode = vkcom::kPaddingModeValid;
654         }
655         else if (padMode == "SAME")
656         {
657             padding_mode = vkcom::kPaddingModeSame;
658         }
659         else
660             CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
661 
662         std::shared_ptr<vkcom::OpBase> op(new vkcom::OpConv(out_channel, has_bias,
663                     filter_size, pad_size,
664                     stride_size, dilation_size,
665                     activation, group,
666                     padding_mode));
667 
668         std::vector<Ptr<BackendWrapper> > blobsWrapper;
669 
670         if (fusedWeights)
671         {
672             Mat wm;
673             weightsMat.copyTo(wm); // to handle the case of isContinuous() == false
674             wm = wm.reshape(1, blobs[0].dims, blobs[0].size);
675             blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(wm)));
676         }
677         else
678         {
679             blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(blobs[0])));
680         }
681 
682         if (has_bias)
683         {
684             Mat biasesMat({out_channel}, CV_32F, &biasvec[0]);
685             blobsWrapper.push_back(Ptr<BackendWrapper>(new VkComBackendWrapper(biasesMat)));
686         }
687 
688         return Ptr<BackendNode>(new VkComBackendNode(inputs, op, blobsWrapper));
689 #endif  // HAVE_VULKAN
690         return Ptr<BackendNode>();
691     }
692 
initHalide(const std::vector<Ptr<BackendWrapper>> & inputs)693     virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
694     {
695 #ifdef HAVE_HALIDE
696         Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
697 
698         const int inpCn = inputBuffer.channels();
699         const int outCn = blobs[0].size[0];
700         const int inpGroupCn = blobs[0].size[1];
701         const int group = inpCn / inpGroupCn;
702         const int outGroupCn = outCn / group;
703 
704         Halide::Buffer<float> weights = wrapToHalideBuffer(blobs[0]);
705 
706         Halide::Var x("x"), y("y"), c("c"), n("n");
707         Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
708         Halide::Func padded_input(name + "_constant_exterior");
709         if (pad.width || pad.height)
710         {
711             Halide::Func bounded =
712                 Halide::BoundaryConditions::constant_exterior(inputBuffer, 0);
713             padded_input(x, y, c, n) = bounded(x, y, c, n);
714         }
715         else
716         {
717             padded_input(x, y, c, n) = inputBuffer(x, y, c, n);
718         }
719 
720         Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
721         Halide::Expr kx = x * stride.width - pad.width + r.x * dilation.width;
722         Halide::Expr ky = y * stride.height - pad.height + r.y * dilation.height;
723         Halide::Expr kc = r.z;
724         for (int i = 1; i < group; ++i)
725         {
726             kc = select(c < outGroupCn * i, kc, inpGroupCn * i + r.z);
727         }
728         Halide::Expr topExpr = sum(padded_input(kx, ky, kc, n) *
729                                    weights(r.x, r.y, r.z, c));
730         if (hasBias())
731         {
732             Halide::Buffer<float> bias = wrapToHalideBuffer(blobs[1], {outCn});
733             topExpr += bias(c);
734         }
735         top(x, y, c, n) = topExpr;
736         return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
737 #endif  // HAVE_HALIDE
738         return Ptr<BackendNode>();
739     }
740 
741 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
initInfEngine(const std::vector<Ptr<BackendWrapper>> & inputs)742     virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
743     {
744         InferenceEngine::DataPtr input = infEngineDataNode(inputs[0]);
745         std::vector<size_t> dims = input->getDims();
746         CV_Assert(dims.size() == 4 || dims.size() == 5);
747         const int inpCn = dims[1];
748         const int outCn = blobs[0].size[0];
749         const int inpGroupCn = blobs[0].size[1];
750         const int group = inpCn / inpGroupCn;
751         InferenceEngine::Layout layout = (dims.size() == 4) ? InferenceEngine::Layout::OIHW :
752                                                               InferenceEngine::Layout::NCDHW;
753 
754         auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
755         if (fusedWeights)
756         {
757             if (weightsMat.isContinuous())
758             {
759                 Mat cvWeights = weightsMat.reshape(1, blobs[0].dims, blobs[0].size);
760                 ieWeights = wrapToInfEngineBlob(cvWeights, layout);
761             }
762             else
763             {
764                 ieWeights = InferenceEngine::make_shared_blob<float>({
765                                 InferenceEngine::Precision::FP32,
766                                 ieWeights->getTensorDesc().getDims(), layout
767                             });
768                 ieWeights->allocate();
769 
770                 Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, outCn);
771                 Mat cvWeights = weightsMat.colRange(0, newWeights.cols);
772                 cvWeights.copyTo(newWeights);
773             }
774         }
775         InferenceEngine::Blob::Ptr ieBiases;
776         if (hasBias() || fusedBias)
777         {
778             Mat biasesMat({outCn}, CV_32F, &biasvec[0]);
779             ieBiases = wrapToInfEngineBlob(biasesMat, {(size_t)outCn}, InferenceEngine::Layout::C);
780         }
781 
782         InferenceEngine::Builder::ConvolutionLayer ieLayer(name);
783 
784         ieLayer.setKernel(kernel_size);
785         ieLayer.setStrides(strides);
786         ieLayer.setDilation(dilations);
787         ieLayer.setPaddingsBegin(pads_begin);
788         ieLayer.setPaddingsEnd(pads_end);
789         ieLayer.setGroup((size_t)group);
790         ieLayer.setOutDepth((size_t)outCn);
791 
792         InferenceEngine::Builder::Layer l = ieLayer;
793         addConstantData("weights", ieWeights, l);
794         if (ieBiases)
795             addConstantData("biases", ieBiases, l);
796 
797         if (!padMode.empty())
798             l.getParameters()["auto_pad"] = padMode == "VALID" ? std::string("valid") : std::string("same_upper");
799 
800         return Ptr<BackendNode>(new InfEngineBackendNode(l));
801     }
802 #endif  // HAVE_DNN_IE_NN_BUILDER_2019
803 
804 #ifdef HAVE_DNN_NGRAPH
initNgraph(const std::vector<Ptr<BackendWrapper>> & inputs,const std::vector<Ptr<BackendNode>> & nodes)805     virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
806                                         const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
807     {
808         CV_Assert_N(inputs.size() >= 1, nodes.size() >= 1);
809         auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
810         std::vector<size_t> dims = ieInpNode->get_shape();
811         CV_Check(dims.size(), dims.size() >= 3 && dims.size() <= 5, "");
812         std::shared_ptr<ngraph::Node> ieWeights = nodes.size() > 1 ? nodes[1].dynamicCast<InfEngineNgraphNode>()->node : nullptr;
813         if (nodes.size() > 1)
814             CV_Assert(ieWeights);  // dynamic_cast should not fail
815         const int inpCn = dims[1];
816         const int inpGroupCn = nodes.size() > 1 ? ieWeights->get_shape()[1] : blobs[0].size[1];
817         const int group = inpCn / inpGroupCn;
818 
819         std::vector<size_t> kernel_shape;
820         if (group != 1)
821         {
822             kernel_shape.push_back(group);
823         }
824         kernel_shape.push_back(numOutput / group);
825         kernel_shape.push_back(inpCn / group);
826         std::copy(kernel_size.begin(), kernel_size.end(), back_inserter(kernel_shape));
827 
828         if (nodes.size() == 1)
829         {
830             ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, blobs[0].data);
831             if (fusedWeights)
832             {
833                 if (weightsMat.isContinuous())
834                 {
835                     ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, weightsMat.data);
836                 }
837                 else
838                 {
839                     Mat newWeights;
840                     Mat cvWeights = weightsMat.colRange(0, blobs[0].total() / numOutput);
841                     cvWeights.copyTo(newWeights);
842                     ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, newWeights.data);
843                 }
844             }
845         }
846         else
847         {
848             auto shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
849                              ngraph::Shape{kernel_shape.size()}, std::vector<int64_t>(kernel_shape.begin(), kernel_shape.end()));
850             ieWeights  = std::make_shared<ngraph::op::v1::Reshape>(ieWeights, shape, true);
851         }
852 
853         ngraph::op::PadType pad_type = ngraph::op::PadType::EXPLICIT;
854         if (!padMode.empty())
855             pad_type = padMode == "VALID" ? ngraph::op::PadType::VALID : ngraph::op::PadType::SAME_UPPER;
856 
857         std::shared_ptr<ngraph::Node> conv_node;
858         if (group != 1) {
859             conv_node = std::make_shared<ngraph::op::v1::GroupConvolution>(
860                                 ieInpNode, ieWeights,
861                                 ngraph::Strides(strides),
862                                 ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_begin.begin(), pads_begin.end())),
863                                 ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_end.begin(),   pads_end.end())),
864                                 ngraph::Strides(dilations),
865                                 pad_type);
866         } else {
867             conv_node = std::make_shared<ngraph::op::v1::Convolution>(
868                                 ieInpNode, ieWeights,
869                                 ngraph::Strides(strides),
870                                 ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_begin.begin(), pads_begin.end())),
871                                 ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_end.begin(), pads_end.end())),
872                                 ngraph::Strides(dilations),
873                                 pad_type);
874         }
875 
876         if (hasBias() || fusedBias || nodes.size() == 3)
877         {
878             std::vector<size_t> shape(conv_node->get_shape().size(), 1);
879             shape[1] = conv_node->get_shape()[1];
880             std::shared_ptr<ngraph::Node> bias;
881             if (nodes.size() == 3)
882             {
883                 auto bias_shape = std::make_shared<ngraph::op::Constant>(ngraph::element::i64,
884                                     ngraph::Shape{shape.size()}, std::vector<int64_t>(shape.begin(), shape.end()));
885                 bias = std::make_shared<ngraph::op::v1::Reshape>(nodes[2].dynamicCast<InfEngineNgraphNode>()->node, bias_shape, true);
886             }
887             else
888             {
889                 bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), biasvec.data());
890             }
891             auto conv_bias = std::make_shared<ngraph::op::v1::Add>(conv_node, bias, ngraph::op::AutoBroadcastType::NUMPY);
892             return Ptr<BackendNode>(new InfEngineNgraphNode(conv_bias));
893         }
894         return Ptr<BackendNode>(new InfEngineNgraphNode(conv_node));
895     }
896 #endif  // HAVE_DNN_NGRAPH
897 
898     class ParallelConv : public cv::ParallelLoopBody
899     {
900     public:
901         enum { BLK_SIZE = 32, BLK_SIZE_CN = 64 };
902 
903         const Mat* input_;
904         const Mat* weights_;
905         Mat* output_;
906         int outShape[4]; // used only for conv2d
907         std::vector<size_t> kernel_size, pads_begin, pads_end, strides, dilations;
908         int ngroups_, nstripes_;
909         std::vector<int> ofstab_;
910         const std::vector<float>* biasvec_;
911         const std::vector<float>* reluslope_;
912         const ActivationLayer* activ_;
913         bool is1x1_;
914         bool useAVX;
915         bool useAVX2;
916         bool useAVX512;
917         int blk_size_cn;
918 
ParallelConv()919         ParallelConv()
920             : input_(0), weights_(0), output_(0), ngroups_(0), nstripes_(0),
921               biasvec_(0), reluslope_(0), activ_(0), is1x1_(false), useAVX(false), useAVX2(false), useAVX512(false)
922             , blk_size_cn(0)
923         {}
924 
run(const Mat & input,Mat & output,const Mat & weights,const std::vector<float> & biasvec,const std::vector<float> & reluslope,const std::vector<size_t> & kernel_size,const std::vector<size_t> & strides,const std::vector<size_t> & pads_begin,const std::vector<size_t> & pads_end,const std::vector<size_t> & dilations,const ActivationLayer * activ,int ngroups,int nstripes)925         static void run( const Mat& input, Mat& output, const Mat& weights,
926                          const std::vector<float>& biasvec,
927                          const std::vector<float>& reluslope,
928                          const std::vector<size_t>& kernel_size, const std::vector<size_t>& strides,
929                          const std::vector<size_t>& pads_begin, const std::vector<size_t>& pads_end,
930                          const std::vector<size_t>& dilations,
931                          const ActivationLayer* activ, int ngroups, int nstripes )
932         {
933             size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
934                                            1, std::multiplies<size_t>());
935             bool isConv1D = input.dims == 3;
936             bool isConv2D = input.dims == 4;
937             bool isConv3D = input.dims == 5;
938             CV_CheckEQ(static_cast<int>(kernel_size.size()), input.dims - 2, "");
939             CV_Assert_N(input.dims == output.dims,
940                        input.size[0] == output.size[0],
941                        weights.rows == output.size[1],
942                        weights.cols == (input.size[1]/ngroups)*karea,
943                        input.type() == output.type(),
944                        input.type() == weights.type(),
945                        input.type() == CV_32FC1,
946                        input.isContinuous(),
947                        output.isContinuous(),
948                        biasvec.size() == (size_t)output.size[1]+2);
949             CV_Check(weights.step1(), weights.step1() % VEC_ALIGN == 0, "");
950             CV_CheckType(weights.type(), CV_32FC1, "");
951             ParallelConv p;
952 
953             p.input_ = &input;
954             p.weights_ = &weights;
955             p.output_ = &output;
956             int max_ind = isConv1D? 3: 4;
957             for( int i = 0; i < max_ind; i++ ) p.outShape[i] = output.size[i];
958             p.outShape[1] /= ngroups;
959 
960             p.kernel_size = kernel_size; p.strides = strides; p.dilations = dilations;
961             p.pads_begin = pads_begin; p.pads_end = pads_end;
962 
963             p.ngroups_ = ngroups;
964             p.nstripes_ = nstripes;
965 
966             int inpCnAll = input.size[1];
967             int depth = (input.dims == 5) ? input.size[2] : 1;
968             int width = input.size[input.dims - 1];
969             int height = isConv1D? 1 : input.size[input.dims - 2];
970             int inpCn = inpCnAll / ngroups;
971 
972             p.is1x1_ = (isConv2D && kernel_size[0] == 1 && kernel_size[1] == 1 &&
973                        pads_begin[0] == 0  && pads_begin[1] == 0) ||
974                        (isConv1D && pads_begin[0] == 0 && kernel_size[0] == 1);
975 
976             p.useAVX    = checkHardwareSupport(CPU_AVX)  && isConv2D;
977             p.useAVX2   = checkHardwareSupport(CPU_AVX2) && isConv2D;
978             p.useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX  && isConv2D;
979 
980             int kernel_d = isConv3D? kernel_size[0] : 1;
981             int kernel_h = isConv1D? 1 : kernel_size[kernel_size.size() - 2];
982             int kernel_w = kernel_size.back();
983 
984             int blk_size_cn0 = cvCeil(800./(kernel_w*kernel_h));
985             int ncn = 16;
986             while (ncn*2 < blk_size_cn0 && ncn < inpCn)
987                 ncn *= 2;
988             ncn = std::min(ncn, inpCn);
989             p.blk_size_cn = ncn;
990 
991             int dil_d = isConv3D? dilations[0] : 1;
992             int dil_h = isConv1D? 1 : dilations[dilations.size() - 2];
993             int dil_w = dilations.back();
994 
995             p.ofstab_.resize(karea * ncn);
996             int* ofstab = &p.ofstab_[0];
997 
998             if (isConv1D)
999             {
1000                 for( int k = 0; k < ncn; k++ )
1001                     for( int k_c = 0; k_c < kernel_w; k_c++ )
1002                         ofstab[k*kernel_w + k_c] = k*width + k_c*dil_w;
1003             }
1004             else if (isConv2D)
1005             {
1006                 for( int k = 0; k < ncn; k++ )
1007                     for( int k_r = 0; k_r < kernel_h; k_r++ )
1008                         for( int k_c = 0; k_c < kernel_w; k_c++ )
1009                             ofstab[(k*kernel_h + k_r)*kernel_w + k_c] =
1010                                    (k*height + k_r*dil_h)*width + k_c*dil_w;
1011             }
1012             else
1013             {
1014                 for( int k = 0; k < ncn; k++ )
1015                     for (int k_d = 0; k_d < kernel_d; k_d++)
1016                         for( int k_r = 0; k_r < kernel_h; k_r++ )
1017                             for( int k_c = 0; k_c < kernel_w; k_c++ )
1018                                 ofstab[(k*kernel_d*kernel_h + k_d*kernel_h + k_r)*kernel_w + k_c] =
1019                                        (k*depth*height + k_d*dil_d*height + k_r*dil_h)*width + k_c*dil_w;
1020             }
1021 
1022             p.biasvec_ = &biasvec;
1023             p.reluslope_ = &reluslope;
1024             p.activ_ = p.reluslope_->empty() ? activ : 0;
1025 
1026             parallel_for_(Range(0, nstripes), p, nstripes);
1027         }
1028 
operator ()(const Range & r0) const1029         virtual void operator ()(const Range &r0) const CV_OVERRIDE
1030         {
1031             const int valign = ConvolutionLayerImpl::VEC_ALIGN;
1032             int ngroups = ngroups_, batchSize = input_->size[0]*ngroups;
1033             bool isConv1D = input_->dims == 3;
1034             bool isConv2D = input_->dims == 4;
1035             bool isConv3D = input_->dims == 5;
1036 
1037             int outW = output_->size[output_->dims - 1];
1038             int outH = isConv1D? 1 : output_->size[output_->dims - 2];
1039             int outCn = output_->size[1]/ngroups;
1040 
1041             int depth = isConv3D? input_->size[2] : 1;
1042             int height = isConv1D? 1 : input_->size[input_->dims - 2];
1043             int width = input_->size[input_->dims - 1];
1044             int inpCn = input_->size[1]/ngroups;
1045 
1046             const int nstripes = nstripes_;
1047 
1048             int kernel_d = isConv3D? kernel_size[0] : 1;
1049             int kernel_h = isConv1D? 1 : kernel_size[kernel_size.size() - 2];
1050             int kernel_w = kernel_size.back();
1051             int karea = kernel_w*kernel_h*kernel_d;
1052 
1053             int pad_d = isConv3D? pads_begin[0] : 0;
1054             int pad_t = isConv1D? 0 : pads_begin[pads_begin.size() - 2];
1055             int pad_l = pads_begin.back();
1056 
1057             int stride_d = isConv3D? strides[0] : 0;
1058             int stride_h = isConv1D? 0 : strides[strides.size() - 2];
1059             int stride_w = strides.back();
1060 
1061             int dilation_d = isConv3D? dilations[0] : 1;
1062             int dilation_h = isConv1D? 1 : dilations[dilations.size() - 2];
1063             int dilation_w = dilations.back();
1064 
1065             int i, j, k, d;
1066             int inpPlaneSize = (int)input_->total(2);
1067             int outPlaneSize = (int)output_->total(2);
1068             bool is1x1 = is1x1_;
1069 
1070             int stripesPerSample;
1071             int stripeSize;
1072             Range r = r0;
1073             bool depthWiseConvolution = !is1x1 && isConv2D && ngroups > 1 && inpCn == 1 &&
1074                 outCn == 1 && kernel_d == 1 && dilation_d == 1 && stride_d == 0 && pad_d == 0 &&
1075                 width >= 16 + dilation_w*(kernel_w - 1);
1076             // for now only 3x3 depth-wise convolutions are supported
1077             depthWiseConvolution = depthWiseConvolution && kernel_w == 3 && kernel_h == 3 &&
1078                 // computing at most 1 pixel from each side can involve padding
1079                 max(stride_w, dilation_w) >= pad_l && max(stride_h, dilation_h) >= pad_t &&
1080                 pad_l <= 1 && pad_t <= 1;
1081 
1082             if( !depthWiseConvolution && nstripes >= batchSize*2 )
1083             {
1084                 stripesPerSample = nstripes/batchSize;
1085                 stripeSize = (int)alignSize((outPlaneSize + stripesPerSample - 1)/stripesPerSample, valign);
1086                 stripeSize = std::min(stripeSize, outPlaneSize);
1087             }
1088             else
1089             {
1090                 stripesPerSample = 1;
1091                 int samplesPerStripe = std::max((batchSize + nstripes - 1)/nstripes, 1);
1092                 r.start *= samplesPerStripe;
1093                 r.end *= samplesPerStripe;
1094                 stripeSize = outPlaneSize;
1095             }
1096 
1097             const float* data_inp0_ = input_->ptr<float>();
1098             const int* ofstab = &ofstab_[0];
1099             const float* wptr_orig_ = weights_->ptr<float>();
1100             size_t wstep = weights_->step1();
1101             const float* biasptr_ = &biasvec_->at(0);
1102             const float* reluptr_ = reluslope_->empty() ? 0 : &reluslope_->at(0);
1103             float* data_out0_ = output_->ptr<float>();
1104             AutoBuffer<float> rowbuf0_;
1105             float* rowbuf0 = 0;
1106             bool use_rowbuf = !depthWiseConvolution;
1107             int blk_size = depthWiseConvolution ? outPlaneSize : min((int)BLK_SIZE, stripeSize);
1108 
1109             // im2row buffer is not used for depth-wise convolution
1110             if(use_rowbuf)
1111             {
1112                 size_t rowbufsz = alignSize(karea*blk_size_cn, valign)*min((int)BLK_SIZE, blk_size);
1113                 //printf("karea=%d, blk_size_cn=%d, rowbufsz=%d, stripeSize=%d\n", karea, blk_size_cn, (int)rowbufsz, stripeSize);
1114                 rowbuf0_.allocate(rowbufsz + valign);
1115                 rowbuf0 = alignPtr(rowbuf0_.data(), (int)(valign*sizeof(float)));
1116                 // we clear the buffer once; ultimately, it lets us to avoid
1117                 // tail processing after running the unrolled/vectorized loop.
1118                 // the main idea is to make sure that the tail (a.k.a. padding) of each row
1119                 // (i.e. the elements with indices between vsz=karea*ncn and vsz_a)
1120                 // does not contain NaNs or Infs. Because the padding in the weights
1121                 // matrix is explicitly initialized with 0's, we handle all other
1122                 // cases nicely, i.e. we can skip expliciting re-initialization
1123                 // of the padding - we just retain elements from the previous iteration
1124                 // of the loop over channels (cn0).
1125                 memset(rowbuf0, 0, rowbufsz*sizeof(rowbuf0[0]) );
1126             }
1127 
1128             for( int stripe = r.start; stripe < r.end; stripe++ )
1129             {
1130                 int subsampleIdx = stripe/stripesPerSample;
1131                 if( subsampleIdx >= batchSize )
1132                     break;
1133                 int stripeStart = (int)((stripe - subsampleIdx*stripesPerSample)*stripeSize);
1134                 int stripeEnd = (int)std::min(stripeStart + stripeSize, outPlaneSize);
1135                 const float* data_inp0 = data_inp0_ + subsampleIdx*inpPlaneSize*inpCn;
1136                 float* data_out0 = data_out0_ + subsampleIdx*outPlaneSize*outCn;
1137                 int startOutCn = (subsampleIdx % ngroups)*outCn;
1138                 const float* wptr_orig = wptr_orig_ + wstep*startOutCn;
1139                 const float* biasptr = biasptr_ + startOutCn;
1140 
1141                 for( int cn0 = 0; cn0 < inpCn; cn0 += blk_size_cn )
1142                 {
1143                     int cn1 = std::min(cn0 + blk_size_cn, inpCn);
1144                     int ncn = cn1 - cn0, vsz = karea*ncn;
1145                     int vsz_a = (int)alignSize(vsz, valign);
1146                     const float* wptr = wptr_orig + cn0*karea;
1147                     // we apply [Channels][P]ReLU (if any) during the final pass only.
1148                     const float* relu = cn1 == inpCn && reluptr_ ? reluptr_ + startOutCn : 0;
1149 
1150                     for( int ofs0 = stripeStart; ofs0 < stripeEnd; ofs0 += blk_size )
1151                     {
1152                         int ofs, ofs1 = std::min(ofs0 + blk_size, stripeEnd);
1153                         int bsz = ofs1 - ofs0;
1154 
1155                         int out_d = ofs0 / (outH * outW);
1156                         int out_i = (ofs0 - out_d * outH * outW) / outW;
1157                         int out_j = ofs0 % outW;
1158 
1159                         if (depthWiseConvolution)
1160                         {
1161                             CV_Assert(out_i == 0 && out_j == 0);
1162                             int in_d = out_d * stride_d - pad_d;
1163                             const float* inptr_ = data_inp0 + (cn0*depth*height + in_d*height)*width;
1164                             float* outptr_ = data_out0 + ofs0;
1165 
1166                         #if CV_TRY_AVX2
1167                             if(useAVX2)
1168                                 opt_AVX2::fastDepthwiseConv(wptr, kernel_h, kernel_w,
1169                                     stride_h, stride_w, dilation_h, dilation_w, pad_t, pad_l,
1170                                     biasptr, relu, inptr_, height, width, outptr_, out_d, outH, outW);
1171                             else
1172                         #endif
1173                         #if CV_TRY_AVX
1174                             if(useAVX)
1175                                 opt_AVX::fastDepthwiseConv(wptr, kernel_h, kernel_w,
1176                                     stride_h, stride_w, dilation_h, dilation_w, pad_t, pad_l,
1177                                     biasptr, relu, inptr_, height, width, outptr_, out_d, outH, outW);
1178                             else
1179                         #endif
1180                             {
1181                                 const float w00_ = wptr[0], w01_ = wptr[1], w02_ = wptr[2],
1182                                             w10 = wptr[3], w11 = wptr[4], w12 = wptr[5],
1183                                             w20_ = wptr[6], w21_ = wptr[7], w22_ = wptr[8];
1184                                 int outW1 = min(outW, (width - dilation_w*(kernel_w - 1) + pad_l)/stride_w);
1185                                 float relu_coeff = relu ? relu[out_d] : 1.f, bias = biasptr[out_d];
1186 
1187                                 for (int out_i = 0; out_i < outH; out_i++)
1188                                 {
1189                                     int in_i = out_i * stride_h - pad_t, out_j = 0;
1190                                     const float* imgptr0 = inptr_ + in_i*width;
1191                                     const float* imgptr1 = imgptr0 + dilation_h*width;
1192                                     const float* imgptr2 = imgptr0 + (dilation_h*2)*width;
1193                                     float out, w00 = w00_, w01 = w01_, w02 = w02_;
1194                                     float w20 = w20_, w21 = w21_, w22 = w22_;
1195                                     if (in_i < 0)
1196                                     {
1197                                         w00 = w01 = w02 = 0.f;
1198                                         imgptr0 = imgptr1;
1199                                     }
1200                                     else if (in_i + dilation_h*(kernel_h-1) >= height)
1201                                     {
1202                                         w20 = w21 = w22 = 0.f;
1203                                         imgptr2 = imgptr1;
1204                                     }
1205                                     float* outptr = outptr_ + out_i*outW;
1206                                     if (pad_l > 0)
1207                                     {
1208                                         out = imgptr0[0]*w01 + imgptr0[dilation_w]*w02 +
1209                                               imgptr1[0]*w11 + imgptr1[dilation_w]*w12 +
1210                                               imgptr2[0]*w21 + imgptr2[dilation_w]*w22 + bias;
1211                                         if (relu)
1212                                             out = out > 0.f ? out : out*relu_coeff;
1213                                         outptr[0] = out;
1214                                         out_j = 1;
1215                                     }
1216 
1217                                 #if CV_SIMD
1218                                     // maybe with AVX or AVX512 strided depthwise convolution
1219                                     // can be accelerated with vector code, but with 4xfloat vectors
1220                                     // it's hardly the case
1221                                     if( stride_w == 1 )
1222                                     {
1223                                         const int VECSZ = v_float32::nlanes;
1224                                         const int out_delta = VECSZ/stride_w;
1225                                         v_float32 vw00 = vx_setall_f32(w00), vw01 = vx_setall_f32(w01), vw02 = vx_setall_f32(w02),
1226                                                   vw10 = vx_setall_f32(w10), vw11 = vx_setall_f32(w11), vw12 = vx_setall_f32(w12),
1227                                                   vw20 = vx_setall_f32(w20), vw21 = vx_setall_f32(w21), vw22 = vx_setall_f32(w22);
1228                                         v_float32 z = vx_setzero_f32(), vbias = vx_setall_f32(bias), vrc = vx_setall_f32(relu_coeff);
1229                                         for( ; out_j < outW1; out_j += out_delta )
1230                                         {
1231                                             if (out_j + out_delta > outW1)
1232                                             {
1233                                                 if (out_j <= pad_l)
1234                                                     break;
1235                                                 out_j = outW1 - out_delta;
1236                                             }
1237                                             int in_j = out_j * stride_w - pad_l;
1238                                             v_float32 v00 = vx_load(imgptr0 + in_j),
1239                                                       v01 = vx_load(imgptr0 + in_j + dilation_w),
1240                                                       v02 = vx_load(imgptr0 + in_j + dilation_w*2),
1241                                                       v10 = vx_load(imgptr1 + in_j),
1242                                                       v11 = vx_load(imgptr1 + in_j + dilation_w),
1243                                                       v12 = vx_load(imgptr1 + in_j + dilation_w*2),
1244                                                       v20 = vx_load(imgptr2 + in_j),
1245                                                       v21 = vx_load(imgptr2 + in_j + dilation_w),
1246                                                       v22 = vx_load(imgptr2 + in_j + dilation_w*2);
1247 
1248                                             v_float32 vout = v00*vw00 + v01*vw01 + v02*vw02 +
1249                                                              v10*vw10 + v11*vw11 + v12*vw12 +
1250                                                              v20*vw20 + v21*vw21 + v22*vw22 + vbias;
1251                                             if (relu)
1252                                                 vout = v_select(vout > z, vout, vout*vrc);
1253                                             v_store(outptr + out_j, vout);
1254                                         }
1255                                     }
1256                                 #endif
1257                                     for (; out_j < outW1; out_j++)
1258                                     {
1259                                         int in_j = out_j * stride_w - pad_l;
1260                                         out = imgptr0[in_j]*w00 + imgptr0[in_j + dilation_w]*w01 + imgptr0[in_j + dilation_w*2]*w02 +
1261                                               imgptr1[in_j]*w10 + imgptr1[in_j + dilation_w]*w11 + imgptr1[in_j + dilation_w*2]*w12 +
1262                                               imgptr2[in_j]*w20 + imgptr2[in_j + dilation_w]*w21 + imgptr2[in_j + dilation_w*2]*w22 + bias;
1263                                         if (relu)
1264                                             out = out > 0.f ? out : out*relu_coeff;
1265                                         outptr[out_j] = out;
1266                                     }
1267 
1268                                     for (; out_j < outW; out_j++ )
1269                                     {
1270                                         int in_j0 = out_j * stride_w - pad_l, in_j1 = in_j0 + dilation_w, in_j2 = in_j0 + dilation_w*2;
1271                                         float s0 = 1.f, s1 = 1.f, s2 = 1.f;
1272                                         if (in_j0 >= width)
1273                                         {
1274                                             in_j0 = 0;
1275                                             s0 = 0.f;
1276                                         }
1277                                         if (in_j1 >= width)
1278                                         {
1279                                             in_j1 = 0;
1280                                             s1 = 0.f;
1281                                         }
1282                                         if (in_j2 >= width)
1283                                         {
1284                                             in_j2 = 0;
1285                                             s2 = 0.f;
1286                                         }
1287                                         out = imgptr0[in_j0]*w00*s0 + imgptr0[in_j1]*w01*s1 + imgptr0[in_j2]*w02*s2 +
1288                                               imgptr1[in_j0]*w10*s0 + imgptr1[in_j1]*w11*s1 + imgptr1[in_j2]*w12*s2 +
1289                                               imgptr2[in_j0]*w20*s0 + imgptr2[in_j1]*w21*s1 + imgptr2[in_j2]*w22*s2 + bias;
1290                                         if (relu)
1291                                             out = out > 0.f ? out : out*relu_coeff;
1292                                         outptr[out_j] = out;
1293                                     }
1294                                 }
1295                             }
1296                             continue;
1297                         }
1298 
1299                         // do im2row for a part of input tensor
1300                         float* rowbuf = rowbuf0;
1301 
1302                         if (isConv1D)
1303                         {
1304                             for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
1305                             {
1306                                 int delta = std::min(ofs1 - ofs, outW - out_j);
1307                                 int out_j1 = out_j + delta;
1308 
1309                                 int in_j = out_j * stride_w - pad_l;
1310                                 const float* imgptr = data_inp0 + cn0*width + in_j;
1311                                 ofs += delta;
1312 
1313                                 // do im2row for a part of input tensor
1314                                 if( is1x1 )
1315                                 {
1316                                     for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
1317                                     {
1318                                         for( k = 0; k < vsz; k++ )
1319                                             rowbuf[k] = imgptr[k*inpPlaneSize];
1320                                     }
1321                                 }
1322                                 else
1323                                 {
1324                                     for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
1325                                     {
1326                                         // this condition should be true for most of the tensor elements, i.e.
1327                                         // most of the time the kernel aperture is inside the tensor X-Y plane.
1328                                         if( out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
1329                                         {
1330                                             for( k = 0; k < vsz; k++ )
1331                                             {
1332                                                 int k1 = ofstab[k];
1333                                                 float v0 = imgptr[k1];
1334                                                 float v1 = imgptr[k1 + stride_w];
1335                                                 rowbuf[k] = v0;
1336                                                 rowbuf[k+vsz_a] = v1;
1337                                             }
1338                                             out_j++;
1339                                             rowbuf += vsz_a;
1340                                             imgptr += stride_w;
1341                                             in_j += stride_w;
1342                                         }
1343                                         else
1344                                         {
1345                                             int i0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
1346                                             int i1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
1347 
1348                                             // here some non-continuous sub-row of the row will not be
1349                                             // filled from the tensor; we need to make sure that the uncovered
1350                                             // elements are explicitly set to 0's. the easiest way is to
1351                                             // set all the elements to 0's before the loop.
1352                                             memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
1353                                             for( k = 0; k < ncn; k++ )
1354                                             {
1355                                                 for( i = i0; i < i1; i++ )
1356                                                 {
1357                                                     int imgofs = k*width + i*dilation_w;
1358                                                     rowbuf[k*kernel_w + i] = imgptr[imgofs];
1359                                                 }
1360                                             }
1361                                         }
1362                                     }
1363                                 }
1364                             }
1365                         }
1366                         else if (isConv2D)
1367                         {
1368                             if( is1x1 && stride_w == 1 && stride_h == 1 )
1369                             {
1370                                 const float* imgptr = data_inp0 + (cn0*height + out_i)*width + out_j;
1371                                 for( int j = 0; j < bsz; j++, rowbuf += vsz_a )
1372                                 {
1373                                     if( j + 4 <= bsz )
1374                                     {
1375                                         k = 0;
1376                                     #if CV_SIMD128
1377                                         for( ; k <= vsz - 4; k += 4 )
1378                                         {
1379                                             const float* inp = imgptr + j + k*inpPlaneSize;
1380                                             v_float32x4 p0 = v_load(inp), p1 = v_load(inp + inpPlaneSize);
1381                                             v_float32x4 p2 = v_load(inp + inpPlaneSize*2), p3 = v_load(inp + inpPlaneSize*3);
1382                                             v_float32x4 r0, r1, r2, r3;
1383                                             v_transpose4x4(p0, p1, p2, p3, r0, r1, r2, r3);
1384                                             v_store(rowbuf + k, r0);
1385                                             v_store(rowbuf + k + vsz_a, r1);
1386                                             v_store(rowbuf + k + vsz_a*2, r2);
1387                                             v_store(rowbuf + k + vsz_a*3, r3);
1388                                         }
1389                                     #endif
1390                                         for( ; k < vsz; k++ )
1391                                         {
1392                                             const float* inp = imgptr + j + k*inpPlaneSize;
1393                                             float v0 = inp[0], v1 = inp[1], v2 = inp[2], v3 = inp[3];
1394                                             rowbuf[k] = v0;
1395                                             rowbuf[k + vsz_a] = v1;
1396                                             rowbuf[k + vsz_a*2] = v2;
1397                                             rowbuf[k + vsz_a*3] = v3;
1398                                         }
1399                                         j += 3;
1400                                         rowbuf += vsz_a*3;
1401                                     }
1402                                     else
1403                                     {
1404                                         for( k = 0; k < vsz; k++ )
1405                                         {
1406                                             rowbuf[k] = imgptr[j + k*inpPlaneSize];
1407                                         }
1408                                     }
1409                                 }
1410                             }
1411                             else
1412                             for( ofs = ofs0; ofs < ofs1; out_j = 0, ++out_i )
1413                             {
1414                                 int delta = std::min(ofs1 - ofs, outW - out_j);
1415                                 int out_j1 = out_j + delta;
1416 
1417                                 int in_i = out_i * stride_h - pad_t;
1418                                 int in_j = out_j * stride_w - pad_l;
1419                                 const float* imgptr = data_inp0 + (cn0*height + in_i)*width + in_j;
1420                                 ofs += delta;
1421 
1422                                 // do im2row for a part of input tensor
1423                                 if( is1x1 )
1424                                 {
1425                                     for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w )
1426                                     {
1427                                         for( k = 0; k < vsz; k++ )
1428                                             rowbuf[k] = imgptr[k*inpPlaneSize];
1429                                     }
1430                                 }
1431                                 else
1432                                 {
1433                                     bool ok_i = 0 <= in_i && in_i < height - (kernel_h-1)*dilation_h;
1434                                     int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
1435                                     int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
1436 
1437                                     for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
1438                                     {
1439                                         // this condition should be true for most of the tensor elements, i.e.
1440                                         // most of the time the kernel aperture is inside the tensor X-Y plane.
1441                                         if( ok_i && out_j + 2 <= out_j1 && 0 <= in_j && in_j + stride_w*2 <= width - (kernel_w-1)*dilation_w )
1442                                         {
1443                                             for( k = 0; k < vsz; k++ )
1444                                             {
1445                                                 int k1 = ofstab[k];
1446                                                 float v0 = imgptr[k1];
1447                                                 float v1 = imgptr[k1 + stride_w];
1448                                                 rowbuf[k] = v0;
1449                                                 rowbuf[k+vsz_a] = v1;
1450                                             }
1451                                             out_j++;
1452                                             rowbuf += vsz_a;
1453                                             imgptr += stride_w;
1454                                             in_j += stride_w;
1455                                         }
1456                                         else
1457                                         {
1458                                             int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
1459                                             int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
1460 
1461                                             // here some non-continuous sub-row of the row will not be
1462                                             // filled from the tensor; we need to make sure that the uncovered
1463                                             // elements are explicitly set to 0's. the easiest way is to
1464                                             // set all the elements to 0's before the loop.
1465                                             memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
1466                                             for( k = 0; k < ncn; k++ )
1467                                             {
1468                                                 for( i = i0; i < i1; i++ )
1469                                                 {
1470                                                     for( j = j0; j < j1; j++ )
1471                                                     {
1472                                                         int imgofs = k*(width*height) + i*(dilation_h*width) + j*dilation_w;
1473                                                         rowbuf[(k*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
1474                                                     }
1475                                                 }
1476                                             }
1477                                         }
1478                                     }
1479                                 }
1480                             }
1481                         }
1482                         else
1483                         {
1484                             for( ofs = ofs0; ofs < ofs1; out_d += (out_i + 1) / outH, out_i = (out_i + 1) % outH, out_j = 0 )
1485                             {
1486                                 int delta = std::min(ofs1 - ofs, outW - out_j);
1487                                 int out_j1 = out_j + delta;
1488 
1489                                 int in_d = out_d * stride_d - pad_d;
1490                                 int in_i = out_i * stride_h - pad_t;
1491                                 int in_j = out_j * stride_w - pad_l;
1492                                 const float* imgptr = data_inp0 + (cn0*depth*height + in_d*height + in_i)*width + in_j;
1493                                 ofs += delta;
1494 
1495                                 int d0 = std::max(0, (-in_d + dilation_d - 1) / dilation_d);
1496                                 int d1 = std::min(kernel_d, (depth - in_d + dilation_d - 1) / dilation_d);
1497 
1498                                 int i0 = std::max(0, (-in_i + dilation_h-1)/dilation_h);
1499                                 int i1 = std::min(kernel_h, (height - in_i + dilation_h-1)/dilation_h);
1500 
1501                                 for( ; out_j < out_j1; out_j++, rowbuf += vsz_a, imgptr += stride_w, in_j += stride_w )
1502                                 {
1503                                     int j0 = std::max(0, (-in_j + dilation_w-1)/dilation_w);
1504                                     int j1 = std::min(kernel_w, (width - in_j + dilation_w-1)/dilation_w);
1505 
1506                                     // here some non-continuous sub-row of the row will not be
1507                                     // filled from the tensor; we need to make sure that the uncovered
1508                                     // elements are explicitly set to 0's. the easiest way is to
1509                                     // set all the elements to 0's before the loop.
1510                                     memset(rowbuf, 0, vsz*sizeof(rowbuf[0]));
1511                                     for( k = 0; k < ncn; k++ )
1512                                     {
1513                                         for ( d = d0; d < d1; d++)
1514                                         {
1515                                             for( i = i0; i < i1; i++ )
1516                                             {
1517                                                 for( j = j0; j < j1; j++ )
1518                                                 {
1519                                                     int imgofs = k*(depth*width*height) + d*dilation_d*width*height + i*(dilation_h*width) + j*dilation_w;
1520                                                     rowbuf[(k*kernel_d*kernel_h + d*kernel_h + i)*kernel_w + j] = imgptr[imgofs];
1521                                                 }
1522                                             }
1523                                         }
1524                                     }
1525                                 }
1526                             }
1527                         }
1528 
1529                         // now compute dot product of the weights
1530                         // and im2row-transformed part of the tensor
1531                     #if CV_TRY_AVX512_SKX
1532                         /* AVX512 convolution requires an alignment of 16, and ROI is only there for larger vector sizes */
1533                         if(useAVX512)
1534                             opt_AVX512_SKX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
1535                                           outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
1536                         else
1537                     #endif
1538                     #if CV_TRY_AVX2
1539                         if(useAVX2)
1540                             opt_AVX2::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
1541                                           outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
1542                         else
1543                     #endif
1544                     #if CV_TRY_AVX
1545                         if(useAVX)
1546                             opt_AVX::fastConv(wptr, wstep, biasptr, rowbuf0, data_out0 + ofs0,
1547                                          outShape, bsz, vsz, vsz_a, relu, cn0 == 0);
1548                         else
1549                     #endif
1550                         for( int i = 0; i < outCn; i += 2 )
1551                         {
1552                             const float* wptr0 = wptr + i*wstep;
1553                             const float* wptr1 = wptr0 + wstep;
1554                             float* outptr0 = data_out0 + ofs0 + i*outPlaneSize;
1555                             float* outptr1 = outptr0 + outPlaneSize;
1556                             float bias0 = biasptr[i], bias1 = biasptr[i+1];
1557                             float r0 = 1.f, r1 = 1.f;
1558 
1559                             if( i+1 >= outCn )
1560                             {
1561                                 wptr1 = wptr0;
1562                                 outptr1 = outptr0;
1563                                 bias1 = bias0;
1564                             }
1565 
1566                             if( relu )
1567                             {
1568                                 r0 = relu[i]; r1 = relu[i+1];
1569                                 if( i+1 >= outCn )
1570                                     r1 = r0;
1571                             }
1572 
1573                             int j = 0;
1574                         #if CV_SIMD128
1575                             v_float32x4 vr0 = v_setall_f32(r0), vr1 = v_setall_f32(r1), z = v_setzero_f32();
1576 
1577                             for( ; j <= bsz - 4; j += 4 )
1578                             {
1579                                 const float* rptr = rowbuf0 + j*vsz_a;
1580                                 v_float32x4 s0, s1;
1581 
1582                                 if( cn0 == 0 )
1583                                 {
1584                                     s0 = v_setall_f32(bias0);
1585                                     s1 = v_setall_f32(bias1);
1586                                 }
1587                                 else
1588                                 {
1589                                     s0 = v_load(outptr0 + j);
1590                                     s1 = v_load(outptr1 + j);
1591                                 }
1592 
1593                                 v_float32x4 vs00 = v_setzero_f32(), vs01 = v_setzero_f32(),
1594                                             vs02 = v_setzero_f32(), vs03 = v_setzero_f32(),
1595                                             vs10 = v_setzero_f32(), vs11 = v_setzero_f32(),
1596                                             vs12 = v_setzero_f32(), vs13 = v_setzero_f32();
1597                                 for( k = 0; k < vsz; k += 4, rptr += 4 )
1598                                 {
1599                                     v_float32x4 w0 = v_load_aligned(wptr0 + k);
1600                                     v_float32x4 w1 = v_load_aligned(wptr1 + k);
1601                                     v_float32x4 r0 = v_load_aligned(rptr);
1602                                     v_float32x4 r1 = v_load_aligned(rptr + vsz_a);
1603                                     v_float32x4 r2 = v_load_aligned(rptr + vsz_a*2);
1604                                     v_float32x4 r3 = v_load_aligned(rptr + vsz_a*3);
1605 
1606                                     vs00 = v_fma(w0, r0, vs00);
1607                                     vs01 = v_fma(w0, r1, vs01);
1608                                     vs02 = v_fma(w0, r2, vs02);
1609                                     vs03 = v_fma(w0, r3, vs03);
1610 
1611                                     vs10 = v_fma(w1, r0, vs10);
1612                                     vs11 = v_fma(w1, r1, vs11);
1613                                     vs12 = v_fma(w1, r2, vs12);
1614                                     vs13 = v_fma(w1, r3, vs13);
1615                                 }
1616                                 s0 += v_reduce_sum4(vs00, vs01, vs02, vs03);
1617                                 s1 += v_reduce_sum4(vs10, vs11, vs12, vs13);
1618                                 if( relu )
1619                                 {
1620                                     s0 = v_select(s0 > z, s0, s0*vr0);
1621                                     s1 = v_select(s1 > z, s1, s1*vr1);
1622                                 }
1623 
1624                                 v_store(outptr0 + j, s0);
1625                                 v_store(outptr1 + j, s1);
1626                             }
1627                         #endif
1628                             for( ; j < bsz; j++ )
1629                             {
1630                                 const float* rptr = rowbuf0 + j*vsz_a;
1631                                 float s00, s10;
1632 
1633                                 if( cn0 == 0 )
1634                                 {
1635                                     s00 = bias0;
1636                                     s10 = bias1;
1637                                 }
1638                                 else
1639                                 {
1640                                     s00 = outptr0[j];
1641                                     s10 = outptr1[j];
1642                                 }
1643 
1644                                 for( k = 0; k < vsz; k++ )
1645                                 {
1646                                     float r0 = rptr[k];
1647                                     s00 += wptr0[k]*r0;
1648                                     s10 += wptr1[k]*r0;
1649                                 }
1650                                 if( relu )
1651                                 {
1652                                     s00 = s00 > 0.f ? s00 : s00*r0;
1653                                     s10 = s10 > 0.f ? s10 : s10*r1;
1654                                 }
1655 
1656                                 outptr0[j] = s00;
1657                                 outptr1[j] = s10;
1658                             }
1659                         }
1660                     }
1661                 }
1662 
1663                 if( activ_ )
1664                     activ_->forwardSlice(data_out0 + stripeStart, data_out0 + stripeStart,
1665                                          (int)(stripeEnd - stripeStart),
1666                                          outPlaneSize, startOutCn, startOutCn + outCn);
1667             }
1668         }
1669     };
1670 
1671 #ifdef HAVE_OPENCL
forward_ocl(InputArrayOfArrays inps,OutputArrayOfArrays outs,OutputArrayOfArrays internals)1672     bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
1673     {
1674         if (kernel_size.size() != 2)
1675         {
1676             // no OpenCL optimizations, see .supportedBacked()
1677             return false;
1678         }
1679 
1680         std::vector<UMat> inputs;
1681         std::vector<UMat> outputs;
1682 
1683         bool use_half = (inps.depth() == CV_16S);
1684         inps.getUMatVector(inputs);
1685         outs.getUMatVector(outputs);
1686 
1687         CV_Assert(outputs.size() == 1);
1688         for (int i = 0; i < inputs.size(); ++i)
1689             CV_Assert(inputs[i].u != outputs[0].u);
1690 
1691         if (blobs.empty())
1692         {
1693             size_t n = inputs.size() - 1;
1694             umat_blobs.resize(n);
1695             for (size_t i = 0; i < n; i++)
1696             {
1697                 inputs[i + 1].copyTo(umat_blobs[i]);
1698             }
1699             inputs.resize(1);
1700         }
1701 
1702         if (umat_blobs.empty())
1703         {
1704             size_t n = blobs.size();
1705             umat_blobs.resize(n);
1706             for (size_t i = 0; i < n; i++)
1707             {
1708                 if (use_half)
1709                     convertFp16(blobs[i], umat_blobs[i]);
1710                 else
1711                     blobs[i].copyTo(umat_blobs[i]);
1712             }
1713         }
1714 
1715         if (convolutionOp.empty() || blobs.empty())
1716         {
1717             OCL4DNNConvConfig config;
1718             config.in_shape = shape(inputs[0]);
1719             config.out_shape = shape(outputs[0]);
1720             config.kernel = kernel;
1721             config.pad = pad;
1722             config.stride = stride;
1723             config.dilation = dilation;
1724             config.group = inputs[0].size[1] / umat_blobs[0].size[1];
1725             config.bias_term = umat_blobs.size() == 2;
1726             config.use_half = use_half;
1727 
1728             convolutionOp = Ptr<OCL4DNNConvSpatial<float> >(new OCL4DNNConvSpatial<float>(config));
1729         }
1730 
1731         int outCn = umat_blobs[0].size[0];
1732 
1733         reluslope.clear();
1734         if( activ )
1735         {
1736             Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1737             if( !activ_relu.empty() )
1738             {
1739                 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1740                 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU;
1741             }
1742 
1743             Ptr<ReLU6Layer> activ_relu6 = activ.dynamicCast<ReLU6Layer>();
1744             if( !activ_relu6.empty() )
1745             {
1746                 reluslope.resize(2);
1747                 reluslope[0] = activ_relu6->minValue;
1748                 reluslope[1] = activ_relu6->maxValue;
1749                 activType = OCL4DNN_CONV_FUSED_ACTIV_RELU6;
1750             }
1751 
1752             Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1753             if( !activ_chprelu.empty() )
1754             {
1755                 const Mat& m = activ_chprelu->blobs[0];
1756                 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1757                 const float* mdata = m.ptr<float>();
1758                 reluslope.resize(outCn+2);
1759                 std::copy(mdata, mdata + outCn, reluslope.begin());
1760                 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1761                 activType = OCL4DNN_CONV_FUSED_ACTIV_PRELU;
1762             }
1763         }
1764 
1765         if (fusedWeights)
1766         {
1767             if (use_half)
1768                 convertFp16(weightsMat, umat_blobs[0]);
1769             else
1770                 weightsMat.copyTo(umat_blobs[0]);
1771             fusedWeights = false;
1772         }
1773         if (fusedBias)
1774         {
1775             if ( umat_blobs.size() < 2 )
1776                 umat_blobs.resize(2);
1777             if (use_half)
1778                 convertFp16(Mat(biasvec, true), umat_blobs[1]);
1779             else
1780                 Mat(biasvec, true).copyTo(umat_blobs[1]);
1781             convolutionOp->setBias(true);
1782             fusedBias = false;
1783         }
1784 
1785         if ( newActiv )
1786         {
1787             if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU )
1788             {
1789                 CV_Assert(!reluslope.empty());
1790                 convolutionOp->setActivReLU(true, reluslope[0]);
1791             }
1792             else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_PRELU)
1793             {
1794                 CV_Assert(!reluslope.empty());
1795                 convolutionOp->setActivPReLU(true, reluslope);
1796             }
1797             else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_POWER)
1798             {
1799                 convolutionOp->setActivPower(true, power);
1800             }
1801             else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_TANH)
1802             {
1803                 convolutionOp->setActivTanh(true);
1804             }
1805             else if ( activType == OCL4DNN_CONV_FUSED_ACTIV_RELU6)
1806             {
1807                 convolutionOp->setActivReLU6(true, reluslope[0], reluslope[1]);
1808             }
1809             else
1810             {
1811                 convolutionOp->setActivReLU(false, 0);
1812                 convolutionOp->setActivPReLU(false, reluslope);
1813                 convolutionOp->setActivPower(false, 1.f);
1814                 convolutionOp->setActivTanh(false);
1815                 convolutionOp->setActivReLU6(false, 0, 0);
1816             }
1817             newActiv = false;
1818         }
1819 
1820         UMat& inpMat = inputs[0];
1821         UMat& outMat = outputs[0];
1822         int batch_size = inpMat.size[0];
1823 
1824         return convolutionOp->Forward(inpMat,
1825                                       inputs.size() == 2 ? inputs[1] : UMat(),
1826                                       umat_blobs[0],
1827                                       umat_blobs.size() > 1 ? umat_blobs[1] : UMat(),
1828                                       outMat,
1829                                       batch_size);
1830     }
1831 #endif
1832 
forward(InputArrayOfArrays inputs_arr,OutputArrayOfArrays outputs_arr,OutputArrayOfArrays internals_arr)1833     void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
1834     {
1835         CV_TRACE_FUNCTION();
1836         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
1837 
1838 #if CV_SSE3
1839         uint32_t ftzMode = _MM_GET_FLUSH_ZERO_MODE();
1840         uint32_t dazMode = _MM_GET_DENORMALS_ZERO_MODE();
1841         _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
1842         _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
1843 #endif
1844 
1845         CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
1846                    forward_ocl(inputs_arr, outputs_arr, internals_arr))
1847 
1848         if (inputs_arr.depth() == CV_16S)
1849         {
1850             forward_fallback(inputs_arr, outputs_arr, internals_arr);
1851             return;
1852         }
1853 
1854         std::vector<Mat> inputs, outputs;
1855         inputs_arr.getMatVector(inputs);
1856         outputs_arr.getMatVector(outputs);
1857 
1858         int outCn = blobs.empty() ? inputs[1].size[0] : blobs[0].size[0];
1859         // Need to align non-const blobs
1860         if (blobs.empty())
1861         {
1862             Mat wm = inputs[1].reshape(1, outCn);
1863             if (wm.data != weightsMat.data)
1864             {
1865                 int newcols = (int)alignSize(wm.step1(), VEC_ALIGN);
1866                 Mat wm_buffer = Mat(numOutput, newcols, wm.type());
1867                 Mat wm_padding = wm_buffer.colRange(wm.cols, newcols);
1868                 wm_padding.setTo(Scalar::all(0.));
1869                 weightsMat = wm_buffer.colRange(0, wm.cols);
1870 
1871                 wm.copyTo((const Mat&)weightsMat);
1872                 if (inputs.size() > 2)
1873                 {
1874                     Mat biasMat = inputs[2].reshape(1, outCn);
1875                     biasMat.col(0).copyTo(biasvec);
1876                 }
1877                 biasvec.resize(outCn + 2, 0);
1878             }
1879         }
1880         /*if (inputs[0].dims > 3) {
1881             printf("conv %s: input (%d x %d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
1882                    name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2], inputs[0].size[3],
1883                    kernel.width, kernel.height, pad.width, pad.height,
1884                    stride.width, stride.height, dilation.width, dilation.height);
1885         }
1886         else {
1887             printf("conv %s: input (%d x %d x %d), kernel (%d x %d), pad (%d x %d), stride (%d x %d), dilation (%d x %d)\n",
1888                    name.c_str(), inputs[0].size[0], inputs[0].size[1], inputs[0].size[2],
1889                    kernel.width, kernel.height, pad.width, pad.height,
1890                    stride.width, stride.height, dilation.width, dilation.height);
1891         }*/
1892         int inpGroupCn = blobs.empty() ? inputs[1].size[1] : blobs[0].size[1];
1893         CV_Assert_N(inputs.size() >= (size_t)1, inputs[0].size[1] % inpGroupCn == 0,
1894                     outputs.size() == 1, inputs[0].data != outputs[0].data);
1895 
1896         int ngroups = inputs[0].size[1] / inpGroupCn;
1897         CV_Assert(outputs[0].size[1] % ngroups == 0);
1898 
1899         reluslope.clear();
1900         if( activ )
1901         {
1902             Ptr<ReLULayer> activ_relu = activ.dynamicCast<ReLULayer>();
1903             if( !activ_relu.empty() )
1904             {
1905                 reluslope.assign(outCn+2, activ_relu->negativeSlope);
1906             }
1907 
1908             Ptr<ChannelsPReLULayer> activ_chprelu = activ.dynamicCast<ChannelsPReLULayer>();
1909             if( !activ_chprelu.empty() )
1910             {
1911                 const Mat& m = activ_chprelu->blobs[0];
1912                 CV_Assert(m.isContinuous() && m.type() == CV_32F && (int)m.total() == outCn);
1913                 const float* mdata = m.ptr<float>();
1914                 reluslope.resize(outCn+2);
1915                 std::copy(mdata, mdata + outCn, reluslope.begin());
1916                 reluslope[outCn] = reluslope[outCn+1] = reluslope[outCn-1];
1917             }
1918         }
1919 
1920 #ifdef HAVE_TENGINE
1921         bool tengine_ret = false; ;
1922 
1923         std::vector<Mat> teng_in, teng_out;
1924         inputs_arr.getMatVector(teng_in);
1925         outputs_arr.getMatVector(teng_out);
1926 
1927         int inch = teng_in[0].size[1];    // inch
1928         int in_h = teng_in[0].size[2];    // in_h
1929         int in_w = teng_in[0].size[3];    // in_w
1930 
1931         int out_b = teng_out[0].size[0];  // out batch size
1932         int outch = teng_out[0].size[1];  // outch
1933         int out_h = teng_out[0].size[2];  // out_h
1934         int out_w = teng_out[0].size[3];  // out_w
1935 
1936         float *input_  = teng_in[0].ptr<float>();
1937         float *output_ = teng_out[0].ptr<float>();
1938         float *kernel_ = weightsMat.ptr<float>();
1939         float *teg_bias = &biasvec[0];
1940 
1941         int nstripes = std::max(getNumThreads(), 1);
1942 
1943         /* tengine_init will run when first time. */
1944         if(NULL == tengine_graph)
1945         {
1946             tengine_graph = tengine_init(name.c_str(), input_, inch, ngroups, in_h, in_w,
1947                                          output_, out_b, outch, out_h, out_w,
1948                                          kernel_, kernel_size.size(), kernel.height, kernel.width,
1949                                          teg_bias, stride.height, stride.width,
1950                                          pad.height,  pad.width, dilation.height, dilation.width,
1951                                          weightsMat.step1(), padMode, tengine_graph, nstripes);
1952             /*printf("Init(%s):  input=%p(%d %d %d %d ),output=%p(%d %d %d %d ),kernel=%p(%ld %d %d ), bias=%p ,"
1953                    "stride(%d %d), pad(%d %d), dilation(%d %d) ,weightsMat=%ld, padMode=%s ,tengine_graph = %p \n",
1954                    name.c_str(),input_, inch, ngroups, in_h, in_w,
1955                    output_, out_b, outch, out_h, out_w,
1956                    kernel_, kernel_size.size(), kernel.height, kernel.width,
1957                    teg_bias, stride.height, stride.width,
1958                    pad.height,  pad.width, dilation.height, dilation.width,
1959                    weightsMat.step1(), padMode.c_str() ,tengine_graph);*/
1960         }
1961         if(NULL != tengine_graph)
1962         {
1963             tengine_ret = tengine_forward(tengine_graph);
1964         }
1965         /* activation */
1966         if((true == tengine_ret) && activ )
1967         {
1968             int out_cstep = out_h * out_w;	    // out_cstep
1969 
1970             ActivationLayer* activ_ = activ.get();
1971             activ_->forwardSlice(output_, output_, out_cstep, out_cstep, 0, outch);
1972         }
1973         if(false == tengine_ret)
1974 #endif
1975         {
1976             int nstripes = std::max(getNumThreads(), 1);
1977 
1978             ParallelConv::run(inputs[0], outputs[0], weightsMat, biasvec, reluslope,
1979                             kernel_size, strides, pads_begin, pads_end, dilations, activ.get(), ngroups, nstripes);
1980         }
1981 #if CV_SSE3
1982         _MM_SET_FLUSH_ZERO_MODE(ftzMode);
1983         _MM_SET_DENORMALS_ZERO_MODE(dazMode);
1984 #endif
1985     }
1986 
1987 #ifdef HAVE_CUDA
initCUDA(void * context_,const std::vector<Ptr<BackendWrapper>> & inputs,const std::vector<Ptr<BackendWrapper>> & outputs)1988     Ptr<BackendNode> initCUDA(
1989         void *context_,
1990         const std::vector<Ptr<BackendWrapper>>& inputs,
1991         const std::vector<Ptr<BackendWrapper>>& outputs
1992     ) override
1993     {
1994         auto context = reinterpret_cast<csl::CSLContext*>(context_);
1995 
1996         CV_Assert(inputs.size() == 1 || inputs.size() == 2);
1997         auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
1998         auto input_shape = input_wrapper->getShape();
1999 
2000         CV_Assert(outputs.size() == 1);
2001         auto output_wrapper = outputs[0].dynamicCast<CUDABackendWrapper>();
2002         auto output_shape = output_wrapper->getShape();
2003 
2004         const auto output_feature_maps = blobs[0].size[0];
2005         const auto input_feature_maps = input_shape[1];
2006         const auto input_feature_maps_per_group = blobs[0].size[1];
2007         const auto groups = input_feature_maps / input_feature_maps_per_group;
2008 
2009         ConvolutionConfiguration config;
2010 
2011         if (input_shape.size() == 3)
2012         {
2013             // Conv1D
2014             // We add an extra dim for input and output tensors, because CuDNN doesn't support convolution with 3D tensors
2015             input_shape.insert(std::end(input_shape) - 1, 1);
2016             output_shape.insert(std::end(output_shape) - 1, 1);
2017 
2018             // Do the similar thing for the other parameters
2019             pads_begin.insert(std::begin(pads_begin), 0);
2020             pads_end.insert(std::begin(pads_end), 0);
2021             strides.insert(std::begin(strides), 1);
2022             dilations.insert(std::begin(dilations), 1);
2023             kernel_size.insert(std::begin(kernel_size), 1);
2024         }
2025         config.kernel_size.assign(std::begin(kernel_size), std::end(kernel_size));
2026         config.dilations.assign(std::begin(dilations), std::end(dilations));
2027         config.strides.assign(std::begin(strides), std::end(strides));
2028 
2029         if (padMode.empty())
2030         {
2031             config.padMode = ConvolutionConfiguration::PaddingMode::MANUAL;
2032             config.pads_begin.assign(std::begin(pads_begin), std::end(pads_begin));
2033             config.pads_end.assign(std::begin(pads_end), std::end(pads_end));
2034         }
2035         else if (padMode == "VALID")
2036         {
2037             config.padMode = ConvolutionConfiguration::PaddingMode::VALID;
2038         }
2039         else if (padMode == "SAME")
2040         {
2041             config.padMode = ConvolutionConfiguration::PaddingMode::SAME;
2042         }
2043         else
2044         {
2045             CV_Error(Error::StsNotImplemented, padMode + " padding mode not supported by ConvolutionLayer");
2046         }
2047 
2048         config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
2049         config.output_shape.assign(std::begin(output_shape), std::end(output_shape));
2050         config.groups = groups;
2051 
2052         config.fusion_mode = cudaFusionMode;
2053         config.activation_type = cudaActType;
2054         config.relu_negative_slope = cuda_relu_slope;
2055         config.crelu_floor = cuda_crelu_floor;
2056         config.crelu_ceil = cuda_crelu_ceil;
2057         config.power_exp = cuda_power_exp;
2058         config.power_scale = cuda_power_scale;
2059         config.power_shift = cuda_power_shift;
2060 
2061         Mat filtersMat = fusedWeights ? weightsMat : blobs[0];
2062         Mat biasMat = (hasBias() || fusedBias) ? Mat(output_feature_maps, 1, CV_32F, biasvec.data()) : Mat();
2063         if (countNonZero(biasMat) == 0)
2064             biasMat = Mat();
2065 
2066         return make_cuda_node<cuda4dnn::ConvolutionOp>(
2067             preferableTarget, std::move(context->stream), std::move(context->cudnn_handle), config, filtersMat, biasMat);
2068     }
2069 #endif
2070 
getFLOPS(const std::vector<MatShape> & inputs,const std::vector<MatShape> & outputs) const2071     virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
2072                            const std::vector<MatShape> &outputs) const CV_OVERRIDE
2073     {
2074         CV_Assert(inputs.size() == outputs.size() || inputs.size() == outputs.size() + blobs.size());
2075 
2076         int64 flops = 0;
2077         int karea = std::accumulate(kernel_size.begin(), kernel_size.end(), 1, std::multiplies<size_t>());
2078         for (int i = 0; i < outputs.size(); i++)
2079         {
2080             flops += total(outputs[i])*(CV_BIG_INT(2)*karea*inputs[i][1] + 1);
2081         }
2082 
2083         return flops;
2084     }
2085 };
2086 
2087 class DeConvolutionLayerImpl CV_FINAL : public BaseConvolutionLayerImpl
2088 {
2089 public:
2090     Mat weightsMat, biasesMat;
2091     UMat umat_weights;
2092     UMat umat_biases;
2093 
DeConvolutionLayerImpl(const LayerParams & params)2094     DeConvolutionLayerImpl(const LayerParams& params) : BaseConvolutionLayerImpl(params) {}
2095 
computeColRowShape(const MatShape & inpShape,const MatShape & outShape) const2096     MatShape computeColRowShape(const MatShape &inpShape, const MatShape &outShape) const CV_OVERRIDE
2097     {
2098         int dims = inpShape.size();
2099         int inpCn = inpShape[1];
2100         int inpD = dims == 5 ? inpShape[2] : 1;
2101         int inpH = inpShape[dims - 2];
2102         int inpW = inpShape.back();
2103         int outCn = outShape[1];
2104         int ngroups = inpCn / blobs[0].size[0];
2105         int outGroupCn = outCn / ngroups;
2106         int ksize = outGroupCn * std::accumulate(kernel_size.begin(), kernel_size.end(),
2107                                                  1, std::multiplies<size_t>());
2108         return shape(ksize, inpD * inpH * inpW);
2109     }
2110 
supportBackend(int backendId)2111     virtual bool supportBackend(int backendId) CV_OVERRIDE
2112     {
2113         if (backendId == DNN_BACKEND_CUDA)
2114         {
2115             /* only deconvolution 2d and 3d supported */
2116             if (kernel_size.size() == 2 || kernel_size.size() == 3)
2117                 return true;
2118 
2119             return false;
2120         }
2121 
2122 #ifdef HAVE_INF_ENGINE
2123         const int outGroupCn = blobs[0].size[1];  // Weights are in IOHW or IODHW layout
2124         const int group = numOutput / outGroupCn;
2125 
2126         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
2127             return group == 1;
2128         }
2129 
2130 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
2131         if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019)
2132         {
2133             if (kernel_size.size() == 3 && preferableTarget != DNN_TARGET_CPU) {
2134                 return false;
2135             }
2136 
2137             if (std::accumulate(adjust_pads.begin(), adjust_pads.end(), 0, std::plus<size_t>()) > 0)
2138             {
2139                 if (padMode.empty())
2140                 {
2141                     if (preferableTarget != DNN_TARGET_CPU && group != 1)
2142                     {
2143                         for (int i = 0; i < adjust_pads.size(); i++) {
2144                             if (adjust_pads[i] && pads_begin[i])
2145                                 return false;
2146                         }
2147                     }
2148                     for (int i = 0; i < adjust_pads.size(); i++) {
2149                         if (pads_end[i] < adjust_pads[i])
2150                             return false;
2151                     }
2152                     return true;
2153                 }
2154                 else if (padMode == "SAME")
2155                 {
2156                     for (int i = 0; i < adjust_pads.size(); i++) {
2157                         if (kernel_size[i] < pads_begin[i] + 1 + adjust_pads[i])
2158                             return false;
2159                     }
2160                     return true;
2161                 }
2162                 else if (padMode == "VALID")
2163                     return false;
2164             }
2165 
2166             if (group != 1)
2167             {
2168                 return preferableTarget == DNN_TARGET_CPU;
2169             }
2170             if (preferableTarget == DNN_TARGET_OPENCL || preferableTarget == DNN_TARGET_OPENCL_FP16)
2171                 return std::accumulate(dilations.begin(), dilations.end(), 1, std::multiplies<size_t>()) == 1;
2172             return true;
2173         }
2174 #endif  // HAVE_DNN_IE_NN_BUILDER_2019
2175 #endif  // HAVE_INF_ENGINE
2176         {
2177             return backendId == DNN_BACKEND_CUDA ||
2178             (kernel_size.size() == 2 && (backendId == DNN_BACKEND_OPENCV || backendId == DNN_BACKEND_HALIDE));
2179         }
2180     }
2181 
getMemoryShapes(const std::vector<MatShape> & inputs,const int requiredOutputs,std::vector<MatShape> & outputs,std::vector<MatShape> & internals) const2182     bool getMemoryShapes(const std::vector<MatShape> &inputs,
2183                          const int requiredOutputs,
2184                          std::vector<MatShape> &outputs,
2185                          std::vector<MatShape> &internals) const CV_OVERRIDE
2186     {
2187         CV_Assert(!hasBias() || blobs[1].total() == (size_t)numOutput);
2188         CV_Assert(inputs.size() != 0);
2189 
2190         int outCn = numOutput;
2191         std::vector<int> outShape;
2192         outShape.push_back(inputs[0][0]);  // batch
2193         outShape.push_back(outCn);
2194         if (padMode.empty())
2195         {
2196             for (int i = 0; i < kernel_size.size(); i++)
2197                 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] - pads_begin[i] - pads_end[i] + adjust_pads[i]);
2198         }
2199         else if (padMode == "VALID")
2200         {
2201             for (int i = 0; i < kernel_size.size(); i++)
2202                 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + kernel_size[i] + adjust_pads[i]);
2203         }
2204         else if (padMode == "SAME")
2205         {
2206             for (int i = 0; i < kernel_size.size(); i++)
2207                 outShape.push_back(strides[i] * (inputs[0][2 + i] - 1) + 1 + adjust_pads[i]);
2208         }
2209         else
2210             CV_Error(Error::StsError, "Unsupported padding mode " + padMode);
2211 
2212         CV_Assert(outCn % blobs[0].size[1] == 0);
2213         int ngroups = outCn / blobs[0].size[1];
2214 
2215         int inpCn = inputs[0][1];
2216         CV_Assert(inpCn % ngroups == 0 && outCn % ngroups == 0);
2217         CV_Assert(blobs[0].size[0] == inpCn);
2218 
2219         outputs.resize(1, outShape);
2220 
2221         if (!is1x1())
2222             internals.push_back(computeColRowShape(inputs[0], outputs[0]));
2223 
2224         return false;
2225     }
2226 
finalize(InputArrayOfArrays inputs_arr,OutputArrayOfArrays outputs_arr)2227     void finalize(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr) CV_OVERRIDE
2228     {
2229         BaseConvolutionLayerImpl::finalize(inputs_arr, outputs_arr);
2230 
2231         std::vector<Mat> inputs, outputs;
2232         inputs_arr.getMatVector(inputs);
2233         outputs_arr.getMatVector(outputs);
2234 
2235         std::vector<int> inpShape;
2236         std::vector<int> outShape;
2237         for (int i = 2; i < inputs[0].dims; i++) {
2238             inpShape.push_back(inputs[0].size[i]);
2239             outShape.push_back(outputs[0].size[i]);
2240         }
2241         getConvPoolPaddings(outShape, kernel_size, strides, padMode, pads_begin, pads_end);
2242         if (pads_begin.size() == 2) {
2243             for (int i = 0; i < pads_begin.size(); i++) {
2244                 if (pads_begin[i] != pads_end[i])
2245                     CV_Error(Error::StsNotImplemented, "Unsupported asymmetric padding in deconvolution layer");
2246             }
2247             pad = Size(pads_begin[1], pads_begin[0]);
2248         }
2249 
2250         weightsMultipliers.assign(numOutput, 1.0);
2251         if (weightsMat.empty())
2252         {
2253             transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
2254             biasesMat = hasBias() ? blobs[1].reshape(1, numOutput)
2255                                   : Mat::zeros(numOutput, 1, CV_32F);
2256         }
2257     }
2258 
fuseWeights(const Mat & w_,const Mat & b_)2259     void fuseWeights(const Mat& w_, const Mat& b_) CV_OVERRIDE
2260     {
2261         Mat w = w_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(w_.at<float>(0))) : w_;
2262         Mat b = b_.total() == 1 ? Mat(1, numOutput, CV_32F, Scalar(b_.at<float>(0))) : b_;
2263 
2264         CV_Assert_N(!weightsMat.empty(),
2265                      w.empty() || numOutput == w.total(),
2266                      b.empty() || numOutput == b.total());
2267 
2268         if (!w.empty())
2269         {
2270             transpose(blobs[0].reshape(1, blobs[0].size[0]), weightsMat);
2271             weightsMat = weightsMat.reshape(1, numOutput);
2272             for (int i = 0; i < numOutput; ++i)
2273             {
2274                 double wi = w.at<float>(i);
2275                 weightsMultipliers[i] *= wi;
2276                 cv::multiply(weightsMat.row(i), weightsMultipliers[i], weightsMat.row(i));
2277                 biasesMat.at<float>(i) *= wi;
2278             }
2279             weightsMat = weightsMat.reshape(1, weightsMat.total() / blobs[0].size[0]);
2280         }
2281 
2282         if (!b.empty())
2283         {
2284             cv::add(biasesMat, b.reshape(1, numOutput), biasesMat);
2285         }
2286     }
2287 
2288     class MatMulInvoker : public ParallelLoopBody
2289     {
2290     public:
MatMulInvoker(const Mat & a,const Mat & b,Mat & c,int nstripes)2291         MatMulInvoker(const Mat& a, const Mat& b, Mat& c, int nstripes)
2292         {
2293             a_ = &a;
2294             b_ = &b;
2295             c_ = &c;
2296             nstripes_ = nstripes;
2297             useAVX = checkHardwareSupport(CPU_AVX);
2298             useAVX2 = checkHardwareSupport(CPU_AVX2);
2299             useAVX512 = CV_CPU_HAS_SUPPORT_AVX512_SKX;
2300         }
2301 
operator ()(const Range & range_) const2302         void operator()(const Range& range_) const CV_OVERRIDE
2303         {
2304             int stripeSize = (int)alignSize((b_->cols + nstripes_ - 1)/nstripes_, 16);
2305             Range range(range_.start*stripeSize, std::min(range_.end*stripeSize, b_->cols));
2306             int mmax = a_->rows;
2307             int nmax = range.end - range.start;
2308             int kmax = a_->cols;
2309             int m, n, k;
2310             const float* aptr = a_->ptr<float>();
2311             const float* bptr = b_->ptr<float>() + range.start;
2312             float* cptr = c_->ptr<float>() + range.start;
2313             size_t astep = a_->step1();
2314             size_t bstep = b_->step1();
2315             size_t cstep = c_->step1();
2316 
2317         #if CV_TRY_AVX512_SKX
2318             if( useAVX512 )
2319                 opt_AVX512_SKX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
2320             else
2321         #endif
2322         #if CV_TRY_AVX2
2323             if( useAVX2 )
2324                 opt_AVX2::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
2325             else
2326         #endif
2327         #if CV_TRY_AVX
2328             if( useAVX )
2329                 opt_AVX::fastGEMM( aptr, astep, bptr, bstep, cptr, cstep, mmax, kmax, nmax );
2330             else
2331         #endif
2332             for( m = 0; m < mmax; m += 2 )
2333             {
2334                 float* dst0 = cptr + cstep*m;
2335                 float* dst1 = cptr + cstep*std::min(m+1, mmax-1);
2336                 const float* aptr0 = aptr + astep*m;
2337                 const float* aptr1 = aptr + astep*std::min(m+1, mmax-1);
2338 
2339                 for( n = 0; n < nmax; n++ )
2340                 {
2341                     dst0[n] = 0.f;
2342                     dst1[n] = 0.f;
2343                 }
2344 
2345                 for( k = 0; k < kmax; k += 4 )
2346                 {
2347                     float alpha00 = aptr0[k];
2348                     float alpha01 = aptr1[k];
2349                     float alpha10 = 0.f, alpha11 = 0.f;
2350                     float alpha20 = 0.f, alpha21 = 0.f;
2351                     float alpha30 = 0.f, alpha31 = 0.f;
2352                     const float* bptr0 = bptr + k*bstep;
2353                     const float* bptr1 = bptr0;
2354                     const float* bptr2 = bptr0;
2355                     const float* bptr3 = bptr0;
2356 
2357                     if( k+1 < kmax )
2358                     {
2359                         alpha10 = aptr0[k+1];
2360                         alpha11 = aptr1[k+1];
2361                         bptr1 = bptr0 + bstep;
2362                         if( k+2 < kmax )
2363                         {
2364                             alpha20 = aptr0[k+2];
2365                             alpha21 = aptr1[k+2];
2366                             bptr2 = bptr1 + bstep;
2367                             if( k+3 < kmax )
2368                             {
2369                                 alpha30 = aptr0[k+3];
2370                                 alpha31 = aptr1[k+3];
2371                                 bptr3 = bptr2 + bstep;
2372                             }
2373                         }
2374                     }
2375                     n = 0;
2376 
2377                 #if CV_SIMD128
2378                     v_float32x4 a00 = v_setall_f32(alpha00);
2379                     v_float32x4 a01 = v_setall_f32(alpha01);
2380                     v_float32x4 a10 = v_setall_f32(alpha10);
2381                     v_float32x4 a11 = v_setall_f32(alpha11);
2382                     v_float32x4 a20 = v_setall_f32(alpha20);
2383                     v_float32x4 a21 = v_setall_f32(alpha21);
2384                     v_float32x4 a30 = v_setall_f32(alpha30);
2385                     v_float32x4 a31 = v_setall_f32(alpha31);
2386 
2387                     for( ; n <= nmax - 4; n += 4 )
2388                     {
2389                         v_float32x4 d0 = v_load(dst0 + n);
2390                         v_float32x4 d1 = v_load(dst1 + n);
2391                         v_float32x4 b0 = v_load(bptr0 + n);
2392                         v_float32x4 b1 = v_load(bptr1 + n);
2393                         v_float32x4 b2 = v_load(bptr2 + n);
2394                         v_float32x4 b3 = v_load(bptr3 + n);
2395                         // TODO try to improve pipeline width
2396                         d0 = v_fma(b0, a00, d0);
2397                         d1 = v_fma(b0, a01, d1);
2398                         d0 = v_fma(b1, a10, d0);
2399                         d1 = v_fma(b1, a11, d1);
2400                         d0 = v_fma(b2, a20, d0);
2401                         d1 = v_fma(b2, a21, d1);
2402                         d0 = v_fma(b3, a30, d0);
2403                         d1 = v_fma(b3, a31, d1);
2404                         v_store(dst0 + n, d0);
2405                         v_store(dst1 + n, d1);
2406                     }
2407                 #endif
2408 
2409                     for( ; n < nmax; n++ )
2410                     {
2411                         float b0 = bptr0[n];
2412                         float b1 = bptr1[n];
2413                         float b2 = bptr2[n];
2414                         float b3 = bptr3[n];
2415                         float d0 = dst0[n] + alpha00*b0 + alpha10*b1 + alpha20*b2 + alpha30*b3;
2416                         float d1 = dst1[n] + alpha01*b0 + alpha11*b1 + alpha21*b2 + alpha31*b3;
2417                         dst0[n] = d0;
2418                         dst1[n] = d1;
2419                     }
2420                 }
2421             }
2422         }
2423 
2424         const Mat *a_, *b_;
2425         Mat* c_;
2426         int nstripes_;
2427         bool useAVX;
2428         bool useAVX2;
2429         bool useAVX512;
2430     };
2431 
2432     class Col2ImInvoker : public cv::ParallelLoopBody
2433     {
2434     public:
2435         const float* data_col;
2436         const float* biasvec;
2437         int channels, height, width;
2438         int kernel_h, kernel_w;
2439         int pad_h, pad_w;
2440         int stride_h, stride_w;
2441         float* data_im;
2442         int height_col, width_col;
2443         int nstripes;
2444         bool is1x1;
2445 
Col2ImInvoker()2446         Col2ImInvoker()
2447             : data_col(0), biasvec(0), channels(0), height(0), width(0),
2448               kernel_h(0), kernel_w(0), pad_h(0), pad_w(0), stride_h(0), stride_w(0), data_im(0),
2449               height_col(0), width_col(0), nstripes(0), is1x1(0)
2450         {}
2451 
run(const float * data_col,int channels,int height,int width,int kernel_h,int kernel_w,int pad_h,int pad_w,int stride_h,int stride_w,int height_col,int width_col,float * data_im,const float * biasvec,bool is1x1)2452         static void run(const float* data_col,
2453                         int channels, int height, int width,
2454                         int kernel_h, int kernel_w,
2455                         int pad_h, int pad_w,
2456                         int stride_h, int stride_w,
2457                         int height_col, int width_col,
2458                         float* data_im,
2459                         const float* biasvec,
2460                         bool is1x1)
2461         {
2462             const int nstripes = getNumThreads();
2463 
2464             Col2ImInvoker t;
2465             t.data_col = data_col;
2466             t.data_im = data_im;
2467             t.channels = channels; t.height = height; t.width = width;
2468             t.kernel_h = kernel_h; t.kernel_w = kernel_w;
2469             t.pad_h = pad_h; t.pad_w = pad_w;
2470             t.stride_h = stride_h; t.stride_w = stride_w;
2471             t.height_col = height_col;
2472             t.width_col = width_col;
2473             t.nstripes = nstripes;
2474             t.is1x1 = is1x1;
2475             t.biasvec = biasvec;
2476 
2477             parallel_for_(Range(0, nstripes), t, nstripes);
2478         }
2479 
operator ()(const Range & r) const2480         virtual void operator ()(const Range &r) const CV_OVERRIDE
2481         {
2482             const float* data_col_ = data_col;
2483             float* data_im_ = data_im;
2484             int coeff_h = (1 - stride_h * kernel_w * height_col) * width_col;
2485             int coeff_w = (1 - stride_w * height_col * width_col);
2486             size_t total = (size_t)channels * height * width;
2487             size_t stripeSize = (total + nstripes - 1)/nstripes;
2488             size_t startIndex = r.start*stripeSize;
2489             size_t endIndex = std::min(r.end*stripeSize, total);
2490             int w = (int)(startIndex % width + pad_w);
2491             int h = (int)((startIndex / width) % height + pad_h);
2492             int c = (int)(startIndex / (width * height));
2493             int h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
2494             int h_col_end = std::min(h / stride_h + 1, height_col);
2495             int plane_size_col = height_col * width_col;
2496             int offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
2497             bool is1x1_ = is1x1;
2498             const float* biasvec_ = biasvec;
2499 
2500             for (size_t index = startIndex; index < endIndex; index++)
2501             {
2502                 // compute the start and end of the output
2503                 int w_col_start = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
2504                 int w_col_end = std::min(w / stride_w + 1, width_col);
2505                 float val;
2506 
2507                 if( is1x1_ )
2508                     val = data_im_[index];
2509                 else
2510                 {
2511                     val = 0.f;
2512                     for (int h_col = h_col_start; h_col < h_col_end; ++h_col) {
2513                         for (int w_col = w_col_start; w_col < w_col_end; ++w_col) {
2514                             val += data_col_[offset + h_col * coeff_h + w_col * coeff_w];
2515                         }
2516                     }
2517                 }
2518                 data_im_[index] = val + biasvec_[c];
2519 
2520                 offset += plane_size_col;
2521                 if( ++w >= width + pad_w )
2522                 {
2523                     w = (int)((index + 1)% width + pad_w);
2524                     h = (int)(((index + 1) / width) % height + pad_h);
2525                     c = (int)((index + 1) / (width * height));
2526                     h_col_start = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
2527                     h_col_end = std::min(h / stride_h + 1, height_col);
2528                     offset = (c * kernel_h * kernel_w + h * kernel_w + w) * plane_size_col;
2529                 }
2530             }
2531         }
2532     };
2533 
2534 #ifdef HAVE_OPENCL
forward_ocl(InputArrayOfArrays inputs_,OutputArrayOfArrays outputs_,OutputArrayOfArrays internals_)2535     bool forward_ocl(InputArrayOfArrays inputs_, OutputArrayOfArrays outputs_, OutputArrayOfArrays internals_)
2536     {
2537         std::vector<UMat> inputs;
2538         std::vector<UMat> outputs;
2539         std::vector<UMat> internals;
2540 
2541         if (inputs_.depth() == CV_16S)
2542             return false;
2543 
2544         inputs_.getUMatVector(inputs);
2545         outputs_.getUMatVector(outputs);
2546         internals_.getUMatVector(internals);
2547 
2548         int outCn = numOutput;
2549         int inpCn = inputs[0].size[1];
2550 
2551         if (is1x1())
2552             return false;
2553 
2554         if (umat_weights.empty())
2555         {
2556             if (fusedWeights)
2557                 weightsMat.copyTo(umat_weights);
2558             else
2559                 transpose(blobs[0].reshape(1, inpCn), umat_weights);
2560 
2561             if (fusedBias)
2562                 biasesMat.copyTo(umat_biases);
2563             else
2564             {
2565                 if (hasBias())
2566                     blobs[1].reshape(1, outCn).copyTo(umat_biases);
2567                 else
2568                     umat_biases = UMat::zeros(outCn, 1, CV_32F);
2569             }
2570         }
2571 
2572         String buildopt = format("-DT=%s ", ocl::typeToStr(inputs[0].type()));
2573         buildopt += format("-DPAD_H=%d -DPAD_W=%d -DKERNEL_H=%d -DKERNEL_W=%d -DSTRIDE_H=%d -DSTRIDE_W=%d ",
2574                            pad.height, pad.width, kernel.height, kernel.width, stride.height, stride.width);
2575 
2576         for (size_t ii = 0; ii < outputs.size(); ii++)
2577         {
2578             int ngroups = outCn / blobs[0].size[1];
2579             int inpGroupCn = inpCn / ngroups;
2580             int outGroupCn = blobs[0].size[1];
2581             const UMat& inp = inputs[ii];
2582             UMat& out = outputs[ii];
2583             int numImg = inp.size[0];
2584             int inpH = inp.size[2], inpW = inp.size[3];
2585             int outH = out.size[2], outW = out.size[3];
2586 
2587             MatShape inpshape = shape(numImg*inpCn, inpH*inpW);
2588             MatShape outshape = shape(numImg*outCn, outH*outW);
2589             UMat convBlob = inputs[ii].reshape(1, inpshape.size(), &inpshape[0]);
2590             UMat decnBlob = out.reshape(1, outshape.size(), &outshape[0]);
2591             int rows = internals[0].rows / ngroups;
2592 
2593             for (int n = 0; n < numImg; n++)
2594             {
2595                 for (int g = 0; g < ngroups; g++)
2596                 {
2597                     UMat colMat = internals[0].rowRange(_Range(g * rows, rows));
2598                     UMat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
2599                     UMat wghtMat = umat_weights.colRange(_Range(g * inpGroupCn, inpGroupCn));
2600                     gemm(wghtMat, convMat, 1, noArray(), 0, colMat, 0);
2601                 }
2602 
2603                 for (int g = 0; g < ngroups; g++)
2604                 {
2605                     int total = outGroupCn * decnBlob.cols;
2606                     int index = 0;
2607                     int height_col = inpH;
2608                     int width_col = inpW;
2609                     int coeff_h = (1 - stride.height * kernel.width * height_col) * width_col;
2610                     int coeff_w = (1 - stride.width * height_col * width_col);
2611 
2612                     ocl::Kernel k("col2im", ocl::dnn::col2im_oclsrc, buildopt);
2613                     k.set(index++, total);
2614                     k.set(index++, ocl::KernelArg::PtrReadOnly(internals[0]));
2615                     k.set(index++, (int)(g * rows * internals[0].cols));
2616                     k.set(index++, outGroupCn);
2617                     k.set(index++, outH);
2618                     k.set(index++, outW);
2619                     k.set(index++, height_col);
2620                     k.set(index++, width_col);
2621                     k.set(index++, coeff_h);
2622                     k.set(index++, coeff_w);
2623                     k.set(index++, ocl::KernelArg::PtrReadOnly(umat_biases));
2624                     k.set(index++, (int)(g * outGroupCn * umat_biases.cols));
2625                     k.set(index++, ocl::KernelArg::PtrWriteOnly(decnBlob));
2626                     k.set(index++, (int)((g + n * ngroups) * outGroupCn * decnBlob.cols));
2627 
2628                     size_t global[] = { (size_t)total };
2629                     bool ret = k.run(1, global, NULL, false);
2630                     if (!ret)
2631                         return false;
2632                 }
2633             }
2634         }
2635 
2636         return true;
2637     }
2638 #endif
2639 
forward(InputArrayOfArrays inputs_arr,OutputArrayOfArrays outputs_arr,OutputArrayOfArrays internals_arr)2640     void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
2641     {
2642         CV_TRACE_FUNCTION();
2643         CV_TRACE_ARG_VALUE(name, "name", name.c_str());
2644 
2645         CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
2646                    forward_ocl(inputs_arr, outputs_arr, internals_arr));
2647 
2648         if (inputs_arr.depth() == CV_16S)
2649         {
2650             forward_fallback(inputs_arr, outputs_arr, internals_arr);
2651             return;
2652         }
2653 
2654         std::vector<Mat> inputs, outputs, internals;
2655         inputs_arr.getMatVector(inputs);
2656         outputs_arr.getMatVector(outputs);
2657         internals_arr.getMatVector(internals);
2658 
2659         int outCn = numOutput;
2660         int inpCn = inputs[0].size[1];
2661         bool is1x1flag = is1x1();
2662         int nstripes = getNumThreads();
2663 
2664         if( weightsMat.empty() )
2665         {
2666             transpose(blobs[0].reshape(1, inpCn), weightsMat);
2667             biasesMat = hasBias() ? blobs[1].reshape(1, outCn) : Mat::zeros(outCn, 1, CV_32F);
2668         }
2669 
2670         for (size_t ii = 0; ii < outputs.size(); ii++)
2671         {
2672             int ngroups = outCn / blobs[0].size[1];
2673             int inpGroupCn = inpCn / ngroups;
2674             int outGroupCn = blobs[0].size[1];
2675             const Mat& inp = inputs[ii];
2676             Mat& out = outputs[ii];
2677             int numImg = inp.size[0];
2678             int inpH = inp.size[2], inpW = inp.size[3];
2679             int outH = out.size[2], outW = out.size[3];
2680 
2681             Mat convBlob = inputs[ii].reshape(1, numImg*inpCn);
2682             Mat decnBlob = out.reshape(1, numImg*outCn);
2683 
2684             for (int n = 0; n < numImg; n++)
2685             {
2686                 for (int g = 0; g < ngroups; g++)
2687                 {
2688                     Mat dstMat = decnBlob.rowRange(_Range((g + n * ngroups) * outGroupCn, outGroupCn));
2689                     Mat &colMat = is1x1flag ? dstMat : internals[0];
2690 
2691                     Mat convMat = convBlob.rowRange(_Range((g + n * ngroups) * inpGroupCn, inpGroupCn));
2692                     Mat wghtMat = weightsMat.colRange(_Range(g * inpGroupCn, inpGroupCn));
2693                     Mat curBiasMat = biasesMat.rowRange(_Range(g * outGroupCn, outGroupCn));
2694 
2695                     //gemm(wghtMat, convMat, 1, colMat, 0, colMat, 0);
2696                     MatMulInvoker mminvoker(wghtMat, convMat, colMat, nstripes);
2697                     parallel_for_(Range(0, nstripes), mminvoker, nstripes);
2698 
2699                     Col2ImInvoker::run(colMat.ptr<float>(), outGroupCn, outH, outW,
2700                                        kernel.height, kernel.width, pad.height, pad.width,
2701                                        stride.height, stride.width, inpH, inpW, dstMat.ptr<float>(),
2702                                        curBiasMat.ptr<float>(), is1x1flag);
2703                 }
2704             }
2705         }
2706     }
2707 
2708 #ifdef HAVE_CUDA
initCUDA(void * context_,const std::vector<Ptr<BackendWrapper>> & inputs,const std::vector<Ptr<BackendWrapper>> & outputs)2709     Ptr<BackendNode> initCUDA(
2710         void *context_,
2711         const std::vector<Ptr<BackendWrapper>>& inputs,
2712         const std::vector<Ptr<BackendWrapper>>& outputs
2713     ) override
2714     {
2715         auto context = reinterpret_cast<csl::CSLContext*>(context_);
2716 
2717         CV_Assert(inputs.size() == 1);
2718         auto input_wrapper = inputs[0].dynamicCast<CUDABackendWrapper>();
2719         auto input_shape = input_wrapper->getShape();
2720 
2721         CV_Assert(outputs.size() == 1);
2722         auto output_wrapper = outputs[0].dynamicCast<CUDABackendWrapper>();
2723         auto output_shape = output_wrapper->getShape();
2724 
2725         const auto output_feature_maps = numOutput;
2726         const auto output_feature_maps_per_group = blobs[0].size[1];
2727         const auto groups = output_feature_maps / output_feature_maps_per_group;
2728 
2729         TransposeConvolutionConfiguration config;
2730         config.kernel_size.assign(std::begin(kernel_size), std::end(kernel_size));
2731         config.dilations.assign(std::begin(dilations), std::end(dilations));
2732         config.strides.assign(std::begin(strides), std::end(strides));
2733 
2734         if (padMode.empty())
2735         {
2736             config.padMode = TransposeConvolutionConfiguration::PaddingMode::MANUAL;
2737             config.pads_begin.assign(std::begin(pads_begin), std::end(pads_begin));
2738             config.pads_end.assign(std::begin(pads_end), std::end(pads_end));
2739         }
2740         else if (padMode == "VALID")
2741         {
2742             config.padMode = TransposeConvolutionConfiguration::PaddingMode::VALID;
2743         }
2744         else if (padMode == "SAME")
2745         {
2746             config.padMode = TransposeConvolutionConfiguration::PaddingMode::SAME;
2747         }
2748         else
2749         {
2750             CV_Error(Error::StsNotImplemented, padMode + " padding mode not supported by DeconvolutionLayer");
2751         }
2752 
2753         config.input_shape.assign(std::begin(input_shape), std::end(input_shape));
2754         config.output_shape.assign(std::begin(output_shape), std::end(output_shape));
2755         config.groups = groups;
2756 
2757         CV_Assert(blobs.size() >= 1);
2758         Mat filtersMat = fusedWeights ? weightsMat.t() : blobs[0];
2759 
2760         Mat biasMat = (hasBias() || fusedBias) ? biasesMat : Mat();
2761         if (countNonZero(biasMat) == 0)
2762             biasMat = Mat();
2763 
2764         return make_cuda_node<cuda4dnn::TransposeConvolutionOp>(
2765             preferableTarget, std::move(context->stream), std::move(context->cudnn_handle), config, filtersMat, biasMat);
2766     }
2767 #endif
2768 
initHalide(const std::vector<Ptr<BackendWrapper>> & inputs)2769     virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
2770     {
2771 #ifdef HAVE_HALIDE
2772         Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
2773 
2774         int inW, inH, inC, inN;
2775         getCanonicalSize(inputBuffer, &inW, &inH, &inC, &inN);
2776         const int outGroupCn = blobs[0].size[1];
2777         const int group = numOutput / outGroupCn;
2778         const int inpGroupCn = blobs[0].size[0] / group;
2779 
2780         Halide::Var x("x"), y("y"), c("c"), n("n");
2781         Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
2782         Halide::Func padded_input(name + "_constant_exterior");
2783         auto weights = wrapToHalideBuffer(blobs[0]);
2784 
2785         Halide::Func dilated_input("dilated_input");
2786         dilated_input(x, y, c, n) = 0.0f;
2787         Halide::RDom r1(0, inW, 0, inH);
2788         dilated_input(r1.x * stride.width, r1.y * stride.height, c, n) =
2789               inputBuffer(r1.x, r1.y, c, n);
2790         dilated_input.compute_root();
2791 
2792         Halide::Func bounded =
2793             Halide::BoundaryConditions::constant_exterior(dilated_input, 0,
2794                                                           0, (inW - 1) * stride.width + 1,
2795                                                           0, (inH - 1) * stride.height + 1,
2796                                                           0, inC, 0, inN);
2797         padded_input(x, y, c, n) = bounded(x, y, c, n);
2798 
2799         Halide::RDom r(0, kernel.width, 0, kernel.height, 0, inpGroupCn);
2800         Halide::Expr kx = x + pad.width - r.x;
2801         Halide::Expr ky = y + pad.height - r.y;
2802         Halide::Expr kInC = r.z;
2803         Halide::Expr kOutC = c;
2804         for (int i = 1; i < group; ++i)
2805         {
2806             kInC = select(c < outGroupCn * i, kInC, inpGroupCn * i + r.z);
2807             kOutC = select(c < outGroupCn * i, kOutC, c - outGroupCn * i);
2808         }
2809         Halide::Expr topExpr = sum(padded_input(kx, ky, kInC, n) *
2810                                    weights(r.x, r.y, kOutC, kInC));
2811         if (hasBias())
2812         {
2813             auto bias = wrapToHalideBuffer(blobs[1], {numOutput});
2814             topExpr += bias(c);
2815         }
2816         top(x, y, c, n) = topExpr;
2817         return Ptr<BackendNode>(new HalideBackendNode({ padded_input, top }));
2818 #endif  // HAVE_HALIDE
2819         return Ptr<BackendNode>();
2820     }
2821 
2822 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
initInfEngine(const std::vector<Ptr<BackendWrapper>> &)2823     virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> > &) CV_OVERRIDE
2824     {
2825         InferenceEngine::Layout layout = blobs[0].dims == 5? InferenceEngine::Layout::NCDHW :
2826                                                              InferenceEngine::Layout::OIHW;
2827 
2828         auto ieWeights = wrapToInfEngineBlob(blobs[0], layout);
2829         if (fusedWeights)
2830         {
2831             ieWeights = InferenceEngine::make_shared_blob<float>({
2832                             InferenceEngine::Precision::FP32,
2833                             ieWeights->getTensorDesc().getDims(), layout
2834                         });
2835             ieWeights->allocate();
2836 
2837             int inpCn = blobs[0].size[0];
2838             Mat newWeights = infEngineBlobToMat(ieWeights).reshape(1, inpCn);
2839             transpose(weightsMat, newWeights);
2840         }
2841 
2842         const int outGroupCn = blobs[0].size[1];  // Weights are in IOHW or OIDHW layout
2843         const int group = numOutput / outGroupCn;
2844 
2845         InferenceEngine::Builder::DeconvolutionLayer ieLayer(name);
2846 
2847         ieLayer.setKernel(kernel_size);
2848         ieLayer.setStrides(strides);
2849         ieLayer.setDilation(dilations);
2850         ieLayer.setPaddingsBegin(pads_begin);
2851 
2852         if (padMode.empty())
2853         {
2854             std::vector<size_t> paddings_end;
2855             for (int i = 0; i < pads_end.size(); i++) {
2856                 paddings_end.push_back(pads_end[i] - adjust_pads[i]);
2857             }
2858             ieLayer.setPaddingsEnd(paddings_end);
2859         }
2860         else if (padMode == "SAME")
2861         {
2862             std::vector<size_t> paddings_end;
2863             for (int i = 0; i < pads_begin.size(); i++) {
2864                 paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
2865             }
2866             ieLayer.setPaddingsEnd(paddings_end);
2867         }
2868         ieLayer.setGroup((size_t)group);
2869         ieLayer.setOutDepth((size_t)numOutput);
2870 
2871         InferenceEngine::Builder::Layer l = ieLayer;
2872         addConstantData("weights", ieWeights, l);
2873         if (hasBias())
2874             addConstantData("biases", wrapToInfEngineBlob(biasesMat, {(size_t)numOutput}, InferenceEngine::Layout::C), l);
2875         return Ptr<BackendNode>(new InfEngineBackendNode(l));
2876     }
2877 #endif  // HAVE_DNN_IE_NN_BUILDER_2019
2878 
2879 
2880 #ifdef HAVE_DNN_NGRAPH
initNgraph(const std::vector<Ptr<BackendWrapper>> & inputs,const std::vector<Ptr<BackendNode>> & nodes)2881     virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> > &inputs,
2882                                         const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
2883     {
2884        const int outGroupCn = blobs[0].size[1];
2885        const int group = numOutput / outGroupCn;
2886        CV_Assert(group == 1);
2887 
2888        auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
2889        std::vector<size_t> kernel_shape = getShape<size_t>(blobs[0]);
2890        auto ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, blobs[0].data);
2891 
2892         if (fusedWeights)
2893         {
2894             Mat newWeights;
2895             transpose(weightsMat, newWeights);
2896             ieWeights = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, kernel_shape, newWeights.data);
2897         }
2898         std::vector<size_t> paddings_end;
2899         if (padMode == "SAME")
2900         {
2901             for (int i = 0; i < pads_begin.size(); i++) {
2902                 paddings_end.push_back(kernel_size[i] - pads_begin[i] - 1 - adjust_pads[i]);
2903             }
2904             adjust_pads = std::vector<size_t>(pads_begin.size(), 0);
2905         } else {
2906             paddings_end = pads_end;
2907         }
2908         ngraph::op::PadType pad_type = padMode == "VALID" ? ngraph::op::PadType::VALID : ngraph::op::PadType::EXPLICIT;
2909 
2910         auto deconv = std::make_shared<ngraph::op::v1::ConvolutionBackpropData>(
2911                           ieInpNode,
2912                           ieWeights,
2913                           ngraph::Strides(strides),
2914                           ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(pads_begin.begin(), pads_begin.end())),
2915                           ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(paddings_end.begin(), paddings_end.end())),
2916                           ngraph::Strides(dilations),
2917                           pad_type,
2918                           ngraph::CoordinateDiff(std::vector<std::ptrdiff_t>(adjust_pads.begin(), adjust_pads.end())));
2919 
2920         if (hasBias() || fusedBias)
2921         {
2922             std::vector<size_t> shape(deconv->get_shape().size(), 1);
2923             shape[1] = numOutput;
2924             auto bias = std::make_shared<ngraph::op::Constant>(ngraph::element::f32, ngraph::Shape(shape), blobs[1].data);
2925             auto deconv_bias = std::make_shared<ngraph::op::v1::Add>(deconv, bias, ngraph::op::AutoBroadcastType::NUMPY);
2926             return Ptr<BackendNode>(new InfEngineNgraphNode(deconv_bias));
2927         }
2928 
2929 
2930         return Ptr<BackendNode>(new InfEngineNgraphNode(deconv));
2931     }
2932 #endif  // HAVE_DNN_NGRAPH
2933 
getFLOPS(const std::vector<MatShape> & inputs,const std::vector<MatShape> & outputs) const2934     virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
2935                            const std::vector<MatShape> &outputs) const CV_OVERRIDE
2936     {
2937         CV_Assert(inputs.size() == outputs.size());
2938 
2939         float flops = 0;
2940         int outChannels = blobs[0].size[0];
2941         size_t karea = std::accumulate(kernel_size.begin(), kernel_size.end(),
2942                                        1, std::multiplies<size_t>());
2943 
2944         for (int i = 0; i < inputs.size(); i++)
2945         {
2946             flops += CV_BIG_INT(2)*outChannels*karea*total(inputs[i]);
2947         }
2948 
2949         return flops;
2950     }
2951 };
2952 
create(const LayerParams & params)2953 Ptr<BaseConvolutionLayer> ConvolutionLayer::create(const LayerParams &params)
2954 {
2955     Ptr<ConvolutionLayerImpl> l(new ConvolutionLayerImpl(params));
2956     return l;
2957 }
2958 
create(const LayerParams & params)2959 Ptr<BaseConvolutionLayer> DeconvolutionLayer::create(const LayerParams &params)
2960 {
2961     return Ptr<BaseConvolutionLayer>(new DeConvolutionLayerImpl(params));
2962 }
2963 
2964 }
2965 }
2966