1 // Tencent is pleased to support the open source community by making ncnn available.
2 //
3 // Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
4 //
5 // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // https://opensource.org/licenses/BSD-3-Clause
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14 
15 #include "bias.h"
16 
17 namespace ncnn {
18 
Bias()19 Bias::Bias()
20 {
21     one_blob_only = true;
22     support_inplace = true;
23 }
24 
load_param(const ParamDict & pd)25 int Bias::load_param(const ParamDict& pd)
26 {
27     bias_data_size = pd.get(0, 0);
28 
29     return 0;
30 }
31 
load_model(const ModelBin & mb)32 int Bias::load_model(const ModelBin& mb)
33 {
34     bias_data = mb.load(bias_data_size, 1);
35     if (bias_data.empty())
36         return -100;
37 
38     return 0;
39 }
40 
forward_inplace(Mat & bottom_top_blob,const Option & opt) const41 int Bias::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
42 {
43     int w = bottom_top_blob.w;
44     int h = bottom_top_blob.h;
45     int channels = bottom_top_blob.c;
46     int size = w * h;
47 
48     #pragma omp parallel for num_threads(opt.num_threads)
49     for (int q = 0; q < channels; q++)
50     {
51         float* ptr = bottom_top_blob.channel(q);
52 
53         float bias = bias_data[q];
54 
55         for (int i = 0; i < size; i++)
56         {
57             ptr[i] += bias;
58         }
59     }
60 
61     return 0;
62 }
63 
64 } // namespace ncnn
65