1 // Tencent is pleased to support the open source community by making ncnn available.
2 //
3 // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4 //
5 // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // https://opensource.org/licenses/BSD-3-Clause
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14
convolution_pack4to8_fp16sa_neon(const Mat & bottom_blob,Mat & top_blob,const Mat & weight_data_fp16,const Mat & bias_data_fp16,int kernel_w,int kernel_h,int dilation_w,int dilation_h,int stride_w,int stride_h,int activation_type,const Mat & activation_params,const Option & opt)15 static void convolution_pack4to8_fp16sa_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt)
16 {
17 int w = bottom_blob.w;
18 int channels = bottom_blob.c;
19
20 int outw = top_blob.w;
21 int outh = top_blob.h;
22 int outch = top_blob.c;
23
24 const int maxk = kernel_w * kernel_h;
25
26 // kernel offsets
27 std::vector<int> _space_ofs(maxk);
28 int* space_ofs = &_space_ofs[0];
29 {
30 int p1 = 0;
31 int p2 = 0;
32 int gap = w * dilation_h - kernel_w * dilation_w;
33 for (int i = 0; i < kernel_h; i++)
34 {
35 for (int j = 0; j < kernel_w; j++)
36 {
37 space_ofs[p1] = p2;
38 p1++;
39 p2 += dilation_w;
40 }
41 p2 += gap;
42 }
43 }
44
45 const __fp16* bias_data_ptr = bias_data_fp16;
46
47 // num_output
48 #pragma omp parallel for num_threads(opt.num_threads)
49 for (int p = 0; p < outch; p++)
50 {
51 __fp16* outptr = top_blob.channel(p);
52
53 for (int i = 0; i < outh; i++)
54 {
55 for (int j = 0; j < outw; j++)
56 {
57 float16x8_t _sum = vdupq_n_f16((__fp16)0.f);
58
59 if (bias_data_ptr)
60 {
61 _sum = vld1q_f16(bias_data_ptr + p * 8);
62 }
63
64 const __fp16* kptr = weight_data_fp16.channel(p);
65
66 // channels
67 for (int q = 0; q < channels; q++)
68 {
69 const Mat m = bottom_blob.channel(q);
70 const __fp16* sptr = m.row<const __fp16>(i * stride_h) + j * stride_w * 4;
71
72 for (int k = 0; k < maxk; k++)
73 {
74 float16x4_t _val = vld1_f16(sptr + space_ofs[k] * 4);
75
76 float16x8_t _w0 = vld1q_f16(kptr);
77 float16x8_t _w1 = vld1q_f16(kptr + 8);
78 float16x8_t _w2 = vld1q_f16(kptr + 16);
79 float16x8_t _w3 = vld1q_f16(kptr + 24);
80
81 _sum = vfmaq_lane_f16(_sum, _w0, _val, 0);
82 _sum = vfmaq_lane_f16(_sum, _w1, _val, 1);
83 _sum = vfmaq_lane_f16(_sum, _w2, _val, 2);
84 _sum = vfmaq_lane_f16(_sum, _w3, _val, 3);
85
86 kptr += 32;
87 }
88 }
89
90 _sum = activation_ps(_sum, activation_type, activation_params);
91
92 vst1q_f16(outptr + j * 8, _sum);
93 }
94
95 outptr += outw * 8;
96 }
97 }
98 }
99