1 // Tencent is pleased to support the open source community by making ncnn available.
2 //
3 // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 //
5 // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // https://opensource.org/licenses/BSD-3-Clause
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14 
15 #include "lrn.h"
16 
17 #include <math.h>
18 
19 namespace ncnn {
20 
LRN()21 LRN::LRN()
22 {
23     one_blob_only = true;
24     support_inplace = true;
25 }
26 
load_param(const ParamDict & pd)27 int LRN::load_param(const ParamDict& pd)
28 {
29     region_type = pd.get(0, 0);
30     local_size = pd.get(1, 5);
31     alpha = pd.get(2, 1.f);
32     beta = pd.get(3, 0.75f);
33     bias = pd.get(4, 1.f);
34 
35     return 0;
36 }
37 
forward_inplace(Mat & bottom_top_blob,const Option & opt) const38 int LRN::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
39 {
40     int w = bottom_top_blob.w;
41     int h = bottom_top_blob.h;
42     int channels = bottom_top_blob.c;
43     size_t elemsize = bottom_top_blob.elemsize;
44     int size = w * h;
45 
46     // squared values with local_size padding
47     Mat square_blob;
48     square_blob.create(w, h, channels, elemsize, opt.workspace_allocator);
49     if (square_blob.empty())
50         return -100;
51 
52     #pragma omp parallel for num_threads(opt.num_threads)
53     for (int q = 0; q < channels; q++)
54     {
55         const float* ptr = bottom_top_blob.channel(q);
56         float* outptr = square_blob.channel(q);
57 
58         for (int i = 0; i < size; i++)
59         {
60             outptr[i] = ptr[i] * ptr[i];
61         }
62     }
63 
64     if (region_type == NormRegion_ACROSS_CHANNELS)
65     {
66         Mat square_sum;
67         square_sum.create(w, h, channels, elemsize, opt.workspace_allocator);
68         if (square_sum.empty())
69             return -100;
70         square_sum.fill(0.f);
71 
72         const float alpha_div_size = alpha / local_size;
73 
74         #pragma omp parallel for num_threads(opt.num_threads)
75         for (int q = 0; q < channels; q++)
76         {
77             // square sum
78             float* ssptr = square_sum.channel(q);
79             for (int p = q - local_size / 2; p <= q + local_size / 2; p++)
80             {
81                 if (p < 0 || p >= channels)
82                     continue;
83 
84                 const float* sptr = square_blob.channel(p);
85                 for (int i = 0; i < size; i++)
86                 {
87                     ssptr[i] += sptr[i];
88                 }
89             }
90 
91             float* ptr = bottom_top_blob.channel(q);
92             for (int i = 0; i < size; i++)
93             {
94                 ptr[i] = static_cast<float>(ptr[i] * pow(bias + alpha_div_size * ssptr[i], -beta));
95             }
96         }
97     }
98     else if (region_type == NormRegion_WITHIN_CHANNEL)
99     {
100         int outw = w;
101         int outh = h;
102 
103         Mat square_blob_bordered = square_blob;
104         int pad = local_size / 2;
105         if (pad > 0)
106         {
107             Option opt_b = opt;
108             opt_b.blob_allocator = opt.workspace_allocator;
109             copy_make_border(square_blob, square_blob_bordered, pad, local_size - pad - 1, pad, local_size - pad - 1, BORDER_CONSTANT, 0.f, opt_b);
110             if (square_blob_bordered.empty())
111                 return -100;
112 
113             w = square_blob_bordered.w;
114             h = square_blob_bordered.h;
115         }
116 
117         const int maxk = local_size * local_size;
118 
119         const float alpha_div_size = alpha / maxk;
120 
121         // norm window offsets
122         std::vector<int> _space_ofs(maxk);
123         int* space_ofs = &_space_ofs[0];
124         {
125             int p1 = 0;
126             int p2 = 0;
127             int gap = w - local_size;
128             for (int i = 0; i < local_size; i++)
129             {
130                 for (int j = 0; j < local_size; j++)
131                 {
132                     space_ofs[p1] = p2;
133                     p1++;
134                     p2++;
135                 }
136                 p2 += gap;
137             }
138         }
139 
140         #pragma omp parallel for num_threads(opt.num_threads)
141         for (int q = 0; q < channels; q++)
142         {
143             float* ptr = bottom_top_blob.channel(q);
144             const Mat m = square_blob_bordered.channel(q);
145 
146             for (int i = 0; i < outh; i++)
147             {
148                 for (int j = 0; j < outw; j++)
149                 {
150                     const float* sptr = m.row(i) + j;
151 
152                     float ss = 0.f;
153 
154                     for (int k = 0; k < maxk; k++)
155                     {
156                         float val = sptr[space_ofs[k]];
157                         ss += val;
158                     }
159 
160                     ptr[j] = static_cast<float>(ptr[j] * pow(bias + alpha_div_size * ss, -beta));
161                 }
162 
163                 ptr += outw;
164             }
165         }
166     }
167 
168     return 0;
169 }
170 
171 } // namespace ncnn
172