1 // Tencent is pleased to support the open source community by making ncnn available.
2 //
3 // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4 //
5 // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // https://opensource.org/licenses/BSD-3-Clause
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14
im2col_sgemm_pack1ton_fp16sa_rvv(const Mat & bottom_im2col,Mat & top_blob,const Mat & kernel,const Mat & _bias,const Option & opt)15 static void im2col_sgemm_pack1ton_fp16sa_rvv(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt)
16 {
17 const int packn = csrr_vlenb() / 2;
18 const word_type vl = vsetvl_e16m1(packn);
19
20 // Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
21
22 const int size = bottom_im2col.w;
23 const int maxk = bottom_im2col.h;
24 const int inch = bottom_im2col.c;
25
26 const int outch = top_blob.c;
27
28 const __fp16* bias = _bias;
29
30 // permute
31 Mat tmp;
32 tmp.create(maxk, inch, size, 2u, 1, opt.workspace_allocator);
33 {
34 #pragma omp parallel for num_threads(opt.num_threads)
35 for (int i = 0; i < size; i++)
36 {
37 __fp16* tmpptr = tmp.channel(i);
38
39 for (int q = 0; q < inch; q++)
40 {
41 const __fp16* img0 = (const __fp16*)bottom_im2col.channel(q) + i;
42
43 for (int k = 0; k < maxk; k++)
44 {
45 tmpptr[0] = img0[0];
46 img0 += size;
47 tmpptr += 1;
48 }
49 }
50 }
51 }
52
53 #pragma omp parallel for num_threads(opt.num_threads)
54 for (int p = 0; p < outch; p++)
55 {
56 __fp16* outptr0 = top_blob.channel(p);
57
58 int i = 0;
59 for (; i < size; i++)
60 {
61 const __fp16* tmpptr = tmp.channel(i);
62 const __fp16* kptr0 = kernel.channel(p);
63
64 int nn = inch * maxk; // inch always > 0
65
66 vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl);
67
68 if (bias)
69 {
70 _sum = vle16_v_f16m1(bias + p * packn, vl);
71 }
72
73 for (int j = 0; j < nn; j++)
74 {
75 __fp16 val = *tmpptr++;
76 vfloat16m1_t _w0 = vle16_v_f16m1(kptr0, vl);
77 _sum = vfmacc_vf_f16m1(_sum, val, _w0, vl);
78
79 kptr0 += packn;
80 }
81
82 vse16_v_f16m1(outptr0, _sum, vl);
83
84 outptr0 += packn;
85 }
86 }
87 }
88
convolution_im2col_sgemm_pack1ton_fp16sa_rvv(const Mat & bottom_blob,Mat & top_blob,const Mat & kernel,const Mat & _bias,int kernel_w,int kernel_h,int dilation_w,int dilation_h,int stride_w,int stride_h,const Option & opt)89 static void convolution_im2col_sgemm_pack1ton_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
90 {
91 int w = bottom_blob.w;
92 int inch = bottom_blob.c;
93
94 int outw = top_blob.w;
95 int outh = top_blob.h;
96 const int size = outw * outh;
97
98 const int maxk = kernel_w * kernel_h;
99
100 // im2col
101 Mat bottom_im2col(size, maxk, inch, 2u, 1, opt.workspace_allocator);
102 {
103 const int gap = w * stride_h - outw * stride_w;
104
105 #pragma omp parallel for num_threads(opt.num_threads)
106 for (int p = 0; p < inch; p++)
107 {
108 const Mat img = bottom_blob.channel(p);
109 __fp16* ptr = bottom_im2col.channel(p);
110
111 for (int u = 0; u < kernel_h; u++)
112 {
113 for (int v = 0; v < kernel_w; v++)
114 {
115 const __fp16* sptr = img.row<const __fp16>(dilation_h * u) + dilation_w * v;
116
117 for (int i = 0; i < outh; i++)
118 {
119 int j = 0;
120 for (; j < outw; j++)
121 {
122 ptr[0] = sptr[0];
123
124 sptr += stride_w;
125 ptr += 1;
126 }
127
128 sptr += gap;
129 }
130 }
131 }
132 }
133 }
134
135 im2col_sgemm_pack1ton_fp16sa_rvv(bottom_im2col, top_blob, kernel, _bias, opt);
136 }
137