1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2013, OpenCV Foundation, all rights reserved.
14 // Copyright (C) 2017, Intel Corporation, all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "../precomp.hpp"
44 #include "layers_common.hpp"
45 #include "../op_cuda.hpp"
46 #include "../op_halide.hpp"
47 #include "../op_inf_engine.hpp"
48 #include "../ie_ngraph.hpp"
49 #include "../op_vkcom.hpp"
50
51 #include "opencv2/imgproc.hpp"
52 #include "opencv2/dnn/shape_utils.hpp"
53 #include "opencv2/core/hal/hal.hpp"
54 #include <algorithm>
55
56 #ifdef HAVE_OPENCL
57 #include "opencl_kernels_dnn.hpp"
58 using namespace cv::dnn::ocl4dnn;
59 #endif
60
61 #ifdef HAVE_CUDA
62 #include "../cuda4dnn/primitives/lrn.hpp"
63 using namespace cv::dnn::cuda4dnn;
64 #endif
65
66 namespace cv
67 {
68 namespace dnn
69 {
70
71 class LRNLayerImpl CV_FINAL : public LRNLayer
72 {
73 public:
LRNLayerImpl(const LayerParams & params)74 LRNLayerImpl(const LayerParams& params)
75 {
76 setParamsFrom(params);
77 type = -1;
78 String nrmType = params.get<String>("norm_region", "ACROSS_CHANNELS");
79 if (nrmType == "ACROSS_CHANNELS")
80 type = CHANNEL_NRM;
81 else if (nrmType == "WITHIN_CHANNEL")
82 type = SPATIAL_NRM;
83 else
84 CV_Error(Error::StsBadArg, "Unknown region type \"" + nrmType + "\"");
85
86 size = params.get<int>("local_size", 5);
87 if (size % 2 != 1 || size <= 0)
88 CV_Error(Error::StsBadArg, "LRN layer supports only positive odd values for local_size");
89
90 alpha = params.get<double>("alpha", 1);
91 beta = params.get<double>("beta", 0.75);
92 bias = params.get<double>("bias", 1);
93 normBySize = params.get<bool>("norm_by_size", true);
94 }
95
96 #ifdef HAVE_OPENCL
97 Ptr<OCL4DNNLRN<float> > lrnOp;
98 #endif
99
supportBackend(int backendId)100 virtual bool supportBackend(int backendId) CV_OVERRIDE
101 {
102 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NN_BUILDER_2019) {
103 return bias == (int)bias;
104 }
105 if (backendId == DNN_BACKEND_INFERENCE_ENGINE_NGRAPH) {
106 return bias == (int)bias;
107 }
108 return backendId == DNN_BACKEND_OPENCV ||
109 backendId == DNN_BACKEND_CUDA ||
110 backendId == DNN_BACKEND_HALIDE ||
111 (backendId == DNN_BACKEND_VKCOM && haveVulkan() && (size % 2 == 1) && (type == CHANNEL_NRM));
112 }
113
114 #ifdef HAVE_OPENCL
finalize(InputArrayOfArrays,OutputArrayOfArrays)115 virtual void finalize(InputArrayOfArrays, OutputArrayOfArrays) CV_OVERRIDE
116 {
117 lrnOp.release();
118 }
119
forward_ocl(InputArrayOfArrays inps,OutputArrayOfArrays outs,OutputArrayOfArrays internals)120 bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
121 {
122 std::vector<UMat> inputs;
123 std::vector<UMat> outputs;
124
125 bool use_half = (inps.depth() == CV_16S);
126 inps.getUMatVector(inputs);
127 outs.getUMatVector(outputs);
128
129 if (lrnOp.empty())
130 {
131 OCL4DNNLRNConfig config;
132 config.lrn_type = type == CHANNEL_NRM ?
133 LRNParameter_NormRegion_ACROSS_CHANNELS :
134 LRNParameter_NormRegion_WITHIN_CHANNEL;
135
136 CHECK_EQ(size % 2, 1)<< "LRN only supports odd values for local_size";
137 config.local_size = size;
138 config.alpha = alpha;
139 config.beta = beta;
140 config.k = bias;
141 CHECK_EQ(4, inputs[0].dims) << "Input must have 4 axes, "
142 << "corresponding to (num, channels, height, width)";
143 config.batch_size = inputs[0].size[0];
144 config.channels = inputs[0].size[1];
145 config.height = inputs[0].size[2];
146 config.width = inputs[0].size[3];
147 config.norm_by_size = normBySize;
148 config.use_half = use_half;
149
150 lrnOp = Ptr<OCL4DNNLRN<float> >(new OCL4DNNLRN<float>(config));
151 }
152
153 if (!lrnOp->Forward(inputs[0], outputs[0]))
154 return false;
155
156 return true;
157 }
158 #endif
159
forward(InputArrayOfArrays inputs_arr,OutputArrayOfArrays outputs_arr,OutputArrayOfArrays internals_arr)160 void forward(InputArrayOfArrays inputs_arr, OutputArrayOfArrays outputs_arr, OutputArrayOfArrays internals_arr) CV_OVERRIDE
161 {
162 CV_TRACE_FUNCTION();
163 CV_TRACE_ARG_VALUE(name, "name", name.c_str());
164
165 CV_Assert(inputs_arr.total() == outputs_arr.total());
166
167 CV_OCL_RUN(IS_DNN_OPENCL_TARGET(preferableTarget),
168 forward_ocl(inputs_arr, outputs_arr, internals_arr))
169
170 if (inputs_arr.depth() == CV_16S)
171 {
172 forward_fallback(inputs_arr, outputs_arr, internals_arr);
173 return;
174 }
175
176 std::vector<Mat> inputs, outputs;
177 inputs_arr.getMatVector(inputs);
178 outputs_arr.getMatVector(outputs);
179
180 CV_Assert(inputs.size() == outputs.size());
181
182 for (int i = 0; i < inputs.size(); i++)
183 {
184 CV_Assert(inputs[i].dims == 4);
185
186 Mat &src = inputs[i];
187 Mat &dst = outputs[i];
188
189 switch (type)
190 {
191 case CHANNEL_NRM:
192 channelNormalization(src, dst);
193 break;
194 case SPATIAL_NRM:
195 spatialNormalization(src, dst);
196 break;
197 default:
198 CV_Error(Error::StsNotImplemented, "Unimplemented mode of LRN layer");
199 break;
200 }
201 }
202 }
203
204 class ChannelLRN : public ParallelLoopBody
205 {
206 public:
ChannelLRN(const float * src,float * dst,int channels,int ksize,float alpha1,float bias1,float beta1,size_t planeSize,int nsamples,int nstripes)207 ChannelLRN(const float* src, float* dst, int channels, int ksize,
208 float alpha1, float bias1, float beta1,
209 size_t planeSize, int nsamples, int nstripes)
210 {
211 src_ = src; dst_ = dst;
212 channels_ = channels;
213 ksize_ = ksize;
214 alpha1_ = alpha1; bias1_ = bias1; beta1_ = beta1;
215 planeSize_ = planeSize; nsamples_ = nsamples; nstripes_ = nstripes;
216 }
217
operator ()(const Range & r) const218 void operator()(const Range& r) const CV_OVERRIDE
219 {
220 int nsamples = nsamples_, nstripes = nstripes_;
221 size_t planeSize = planeSize_, planeSize_n = planeSize * nsamples;
222 size_t elemsPerStripe = (planeSize_n + nstripes - 1)/nstripes;
223 size_t rstart = r.start*elemsPerStripe;
224 size_t rend = r.end == nstripes ? planeSize_n : r.end*elemsPerStripe;
225 rstart = std::min(rstart, planeSize_n);
226 rend = std::min(rend, planeSize_n);
227 float alpha1 = alpha1_, bias1 = bias1_, beta1 = beta1_;
228 int k, channels = channels_, ksize = ksize_;
229
230 AutoBuffer<float> buf_((channels + ksize + 1)*2);
231 float* acc = buf_.data();
232 float* buf = acc + channels + ksize + 1;
233 for( k = 0; k <= ksize; k++ )
234 buf[-k-1] = buf[channels + k] = 0.f;
235
236 for( size_t ofs = rstart; ofs < rend; )
237 {
238 int sampleIdx = (int)(ofs/planeSize);
239 if( sampleIdx >= nsamples )
240 break;
241 size_t ofs0 = ofs - sampleIdx*planeSize;
242 size_t ofs1 = std::min(planeSize - ofs0, rend - ofs) + ofs;
243 const float* src = src_ + sampleIdx*planeSize*channels + ofs0;
244 float* dst = dst_ + sampleIdx*planeSize*channels + ofs0;
245
246 for( ; ofs < ofs1; ofs++, src++, dst++ )
247 {
248 for( k = 0; k < channels; k++ )
249 buf[k] = src[k*planeSize];
250 float s = 0;
251 for( k = 0; k < ksize; k++ )
252 s += buf[k]*buf[k];
253 for( k = 0; k < channels; k++ )
254 {
255 float x1 = buf[k + ksize];
256 float x0 = buf[k - ksize - 1];
257 s = std::max(s + (x1 + x0)*(x1 - x0), 0.f);
258 acc[k] = (float)(alpha1*s + bias1);
259 }
260
261 hal::log32f(acc, acc, channels);
262 for( k = 0; k < channels; k++ )
263 acc[k] *= beta1;
264 hal::exp32f(acc, acc, channels);
265
266 for( k = 0; k < channels; k++ )
267 dst[k*planeSize] = buf[k]*acc[k];
268 }
269 }
270 }
271
272 const float* src_;
273 float* dst_;
274 float alpha1_, bias1_, beta1_;
275 size_t planeSize_;
276 int channels_, ksize_, nsamples_, nstripes_;
277 };
278
channelNormalization(Mat & srcBlob,Mat & dstBlob)279 void channelNormalization(Mat &srcBlob, Mat &dstBlob)
280 {
281 int num = srcBlob.size[0];
282 int channels = srcBlob.size[1];
283 int ksize = (size - 1) / 2;
284 int sizeNormFactor = normBySize ? size : 1;
285 size_t planeSize = srcBlob.size[2]*srcBlob.size[3];
286
287 int nstripes = std::max(getNumThreads(), 1);
288
289 ChannelLRN clrn(srcBlob.ptr<float>(), dstBlob.ptr<float>(), channels,
290 ksize, alpha/sizeNormFactor, bias, -beta, planeSize, num, nstripes);
291 parallel_for_(Range(0, nstripes), clrn, nstripes);
292 }
293
sqrBoxFilter_(const Mat & src,Mat & dst)294 void sqrBoxFilter_(const Mat &src, Mat &dst)
295 {
296 Mat srcRawWrapper(src.rows, src.cols, src.type(), src.data, src.step[0]);
297 cv::sqrBoxFilter(srcRawWrapper, dst, dst.depth(), Size(size, size), Point(-1, -1), false, BORDER_CONSTANT);
298 }
299
spatialNormalization(Mat & srcBlob,Mat & dstBlob)300 void spatialNormalization(Mat &srcBlob, Mat &dstBlob)
301 {
302 int num = srcBlob.size[0];
303 int channels = srcBlob.size[1];
304 int sizeNormFactor = normBySize ? size*size : 1;
305
306 Mat srcMat = srcBlob;
307 Mat dstMat = dstBlob;
308
309 for (int n = 0; n < num; n++)
310 {
311 for (int cn = 0; cn < channels; cn++)
312 {
313 Mat src = getPlane(srcMat, n, cn);
314 Mat dst = getPlane(dstMat, n, cn);
315
316 sqrBoxFilter_(src, dst);
317
318 dst.convertTo(dst, dst.type(), alpha/sizeNormFactor, bias);
319 cv::pow(dst, beta, dst);
320 cv::divide(src, dst, dst);
321 }
322 }
323 }
324
325 #ifdef HAVE_CUDA
initCUDA(void * context_,const std::vector<Ptr<BackendWrapper>> & inputs,const std::vector<Ptr<BackendWrapper>> & outputs)326 Ptr<BackendNode> initCUDA(
327 void *context_,
328 const std::vector<Ptr<BackendWrapper>>& inputs,
329 const std::vector<Ptr<BackendWrapper>>& outputs
330 ) override
331 {
332 auto context = reinterpret_cast<csl::CSLContext*>(context_);
333
334 cuda4dnn::LRNType type_;
335 if (type == CHANNEL_NRM)
336 type_ = cuda4dnn::LRNType::ACROSS_CHANNELS;
337 else if (type == SPATIAL_NRM)
338 type_ = cuda4dnn::LRNType::WITHIN_CHANNEL;
339 else
340 CV_Error(Error::StsNotImplemented, "Unknown normalization region");
341
342 float alphaSize = alpha;
343 if (!normBySize) {
344 switch (type) {
345 case CHANNEL_NRM: alphaSize = alpha * size; break;
346 case SPATIAL_NRM: alphaSize = alpha * size * size; break;
347 }
348 }
349
350 std::size_t largestInputSize = 0;
351 for(auto& wrapper : inputs) {
352 auto input_wrapper = wrapper.dynamicCast<CUDABackendWrapper>();
353 auto shape = input_wrapper->getShape();
354 largestInputSize = std::max<std::size_t>(
355 largestInputSize,
356 std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<int>())
357 );
358 }
359
360 return make_cuda_node<cuda4dnn::LRNOp>(preferableTarget,
361 std::move(context->cudnn_handle), type_, size, alphaSize, beta, bias, largestInputSize);
362 }
363 #endif
364
initVkCom(const std::vector<Ptr<BackendWrapper>> & inputs)365 virtual Ptr<BackendNode> initVkCom(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
366 {
367 #ifdef HAVE_VULKAN
368 std::shared_ptr<vkcom::OpBase> op(new vkcom::OpLRN(size / 2, bias, alpha, beta, normBySize));
369 return Ptr<BackendNode>(new VkComBackendNode(inputs, op));
370 #endif
371 return Ptr<BackendNode>();
372 }
373
initHalide(const std::vector<Ptr<BackendWrapper>> & inputs)374 virtual Ptr<BackendNode> initHalide(const std::vector<Ptr<BackendWrapper> > &inputs) CV_OVERRIDE
375 {
376 #ifdef HAVE_HALIDE
377 float alphaSize = alpha;
378 if (normBySize)
379 alphaSize /= (type == CHANNEL_NRM ? size : size * size);
380 int width, height, channels, numImgs;
381 Halide::Buffer<float> inputBuffer = halideBuffer(inputs[0]);
382 getCanonicalSize(inputBuffer, &width, &height, &channels, &numImgs);
383
384 Halide::Var x("x"), y("y"), c("c"), n("n");
385 Halide::Func top = (name.empty() ? Halide::Func() : Halide::Func(name));
386 Halide::Func padded_sq(name + "_padded_sq");
387 Halide::Func sq("sq");
388 sq(x, y, c, n) = inputBuffer(x, y, c, n) * inputBuffer(x, y, c, n);
389
390 Halide::Func bounded =
391 Halide::BoundaryConditions::constant_exterior(sq, 0, 0, width,
392 0, height,
393 0, channels,
394 0, numImgs);
395 padded_sq(x, y, c, n) = bounded(x, y, c, n);
396
397 Halide::Expr base;
398 if (type == CHANNEL_NRM)
399 {
400 Halide::RDom r((1 - size) / 2, size);
401 base = alphaSize * sum(padded_sq(x, y, c + r, n));
402 }
403 else // SPATIAL_NRM
404 {
405 Halide::RDom r((1 - size) / 2, size, (1 - size) / 2, size);
406 base = alphaSize * sum(padded_sq(x + r.x, y + r.y, c, n));
407 }
408 base += static_cast<float>(bias);
409 top(x, y, c, n) = inputBuffer(x, y, c, n) / pow(base, beta);
410 return Ptr<BackendNode>(new HalideBackendNode({ padded_sq, top }));
411 #endif // HAVE_HALIDE
412 return Ptr<BackendNode>();
413 }
414
applyHalideScheduler(Ptr<BackendNode> & node,const std::vector<Mat * > & inputs,const std::vector<Mat> & outputs,int targetId) const415 virtual void applyHalideScheduler(Ptr<BackendNode>& node,
416 const std::vector<Mat*> &inputs,
417 const std::vector<Mat> &outputs,
418 int targetId) const CV_OVERRIDE
419 {
420 #ifdef HAVE_HALIDE
421 if (targetId != DNN_TARGET_CPU)
422 {
423 Layer::applyHalideScheduler(node, inputs, outputs, targetId);
424 return;
425 }
426 int outW, outH, outC, outN;
427 getCanonicalSize(outputs[0].size, &outW, &outH, &outC, &outN);
428
429 Halide::Var x("x"), y("y"), c("c"), n("n"), yo("yo"), yi("yi"), tile("tile");
430 Halide::Func& top = node.dynamicCast<HalideBackendNode>()->funcs[1];
431 Halide::Func& padded_sq = node.dynamicCast<HalideBackendNode>()->funcs[0];
432
433 if (outW < 8 || outH <= 2)
434 return;
435
436 top.reorder(x, c, y, n)
437 .split(y, yo, yi, 2)
438 .fuse(yo, n, tile)
439 .parallel(tile)
440 .unroll(yi)
441 .vectorize(x, 8);
442 padded_sq.store_at(top, tile)
443 .compute_at(top, yi);
444 #endif // HAVE_HALIDE
445 }
446
447 #ifdef HAVE_DNN_IE_NN_BUILDER_2019
initInfEngine(const std::vector<Ptr<BackendWrapper>> &)448 virtual Ptr<BackendNode> initInfEngine(const std::vector<Ptr<BackendWrapper> >&) CV_OVERRIDE
449 {
450 float alphaSize = alpha;
451 if (!normBySize)
452 alphaSize *= (type == SPATIAL_NRM ? size*size : size);
453
454 InferenceEngine::Builder::NormLayer ieLayer(name);
455 ieLayer.setSize(size);
456 ieLayer.setAlpha(alphaSize);
457 ieLayer.setBeta(beta);
458 ieLayer.setAcrossMaps(type == CHANNEL_NRM);
459
460 InferenceEngine::Builder::Layer l = ieLayer;
461 l.getParameters()["k"] = bias;
462 return Ptr<BackendNode>(new InfEngineBackendNode(l));
463 }
464 #endif // HAVE_DNN_IE_NN_BUILDER_2019
465
466 #ifdef HAVE_DNN_NGRAPH
initNgraph(const std::vector<Ptr<BackendWrapper>> & inputs,const std::vector<Ptr<BackendNode>> & nodes)467 virtual Ptr<BackendNode> initNgraph(const std::vector<Ptr<BackendWrapper> >& inputs, const std::vector<Ptr<BackendNode> >& nodes) CV_OVERRIDE
468 {
469 float alphaSize = alpha;
470 if (!normBySize)
471 alphaSize *= (type == SPATIAL_NRM ? size*size : size);
472
473 auto& ieInpNode = nodes[0].dynamicCast<InfEngineNgraphNode>()->node;
474 std::vector<int64_t> axes;
475 if (type != SPATIAL_NRM) {
476 axes = {1};
477 } else {
478 axes.resize(ieInpNode->get_shape().size() - 2);
479 std::iota(axes.begin(), axes.end(), 2);
480 }
481 auto ngraph_axes = std::make_shared<ngraph::op::Constant>(ngraph::element::i64, ngraph::Shape{axes.size()}, axes.data());
482 auto lrn = std::make_shared<ngraph::op::LRN>(ieInpNode, ngraph_axes, alphaSize, beta, bias, size);
483 return Ptr<BackendNode>(new InfEngineNgraphNode(lrn));
484 }
485 #endif // HAVE_DNN_NGRAPH
486
getFLOPS(const std::vector<MatShape> & inputs,const std::vector<MatShape> & outputs) const487 virtual int64 getFLOPS(const std::vector<MatShape> &inputs,
488 const std::vector<MatShape> &outputs) const CV_OVERRIDE
489 {
490 CV_UNUSED(outputs); // suppress unused variable warning
491 CV_Assert(inputs.size() > 0);
492 long flops = 0;
493
494 for(int i = 0; i < inputs.size(); i++)
495 {
496 if (type == CHANNEL_NRM)
497 {
498 int channels = inputs[i][1];
499 int ksize = (size - 1) / 2;
500
501 flops += inputs[i][0]*(std::min(ksize, channels)*2*total(inputs[i], 2) + channels*4*total(inputs[i], 2));
502
503 if (ksize < channels)
504 {
505 flops += (size + 2*(channels - size))*total(inputs[i], 2);
506 }
507 }
508 else
509 {
510 flops += total(inputs[i])*(2*size*size + 2);
511 }
512 }
513 return flops;
514 }
515
516 private:
517 enum Type
518 {
519 CHANNEL_NRM,
520 SPATIAL_NRM
521 };
522 };
523
create(const LayerParams & params)524 Ptr<LRNLayer> LRNLayer::create(const LayerParams& params)
525 {
526 return Ptr<LRNLayer>(new LRNLayerImpl(params));
527 }
528
529 }
530 }
531