1 // Tencent is pleased to support the open source community by making ncnn available.
2 //
3 // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4 //
5 // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // https://opensource.org/licenses/BSD-3-Clause
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14
convolution_bf16s(const Mat & bottom_blob,Mat & top_blob,const Mat & weight_data_bf16,const Mat & bias_data,int kernel_w,int kernel_h,int dilation_w,int dilation_h,int stride_w,int stride_h,int activation_type,const Mat & activation_params,const Option & opt)15 static void convolution_bf16s(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_bf16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
16 {
17 int w = bottom_blob.w;
18 int channels = bottom_blob.c;
19
20 int outw = top_blob.w;
21 int outh = top_blob.h;
22 int outch = top_blob.c;
23
24 const int maxk = kernel_w * kernel_h;
25
26 // kernel offsets
27 std::vector<int> _space_ofs(maxk);
28 int* space_ofs = &_space_ofs[0];
29 {
30 int p1 = 0;
31 int p2 = 0;
32 int gap = w * dilation_h - kernel_w * dilation_w;
33 for (int i = 0; i < kernel_h; i++)
34 {
35 for (int j = 0; j < kernel_w; j++)
36 {
37 space_ofs[p1] = p2;
38 p1++;
39 p2 += dilation_w;
40 }
41 p2 += gap;
42 }
43 }
44
45 const float* bias_data_ptr = bias_data;
46
47 // num_output
48 #pragma omp parallel for num_threads(opt.num_threads)
49 for (int p = 0; p < outch; p++)
50 {
51 unsigned short* outptr = top_blob.channel(p);
52
53 for (int i = 0; i < outh; i++)
54 {
55 for (int j = 0; j < outw; j++)
56 {
57 float sum = 0.f;
58
59 if (bias_data_ptr)
60 {
61 sum = bias_data_ptr[p];
62 }
63
64 const unsigned short* kptr = (const unsigned short*)weight_data_bf16 + maxk * channels * p;
65
66 // channels
67 for (int q = 0; q < channels; q++)
68 {
69 const Mat m = bottom_blob.channel(q);
70 const unsigned short* sptr = m.row<unsigned short>(i * stride_h) + j * stride_w;
71
72 for (int k = 0; k < maxk; k++)
73 {
74 float val = bfloat16_to_float32(sptr[space_ofs[k]]);
75 float wt = bfloat16_to_float32(kptr[k]);
76 sum += val * wt;
77 }
78
79 kptr += maxk;
80 }
81
82 sum = activation_ss(sum, activation_type, activation_params);
83
84 outptr[j] = float32_to_bfloat16(sum);
85 }
86
87 outptr += outw;
88 }
89 }
90 }
91