1 // Tencent is pleased to support the open source community by making ncnn available.
2 //
3 // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
4 //
5 // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // https://opensource.org/licenses/BSD-3-Clause
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14 
convolution_pack8to1_int8_sse(const Mat & bottom_blob,Mat & top_blob,const Mat & weight_data_int8,int kernel_w,int kernel_h,int dilation_w,int dilation_h,int stride_w,int stride_h,const Option & opt)15 static void convolution_pack8to1_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt)
16 {
17     int w = bottom_blob.w;
18     int channels = bottom_blob.c;
19 
20     int outw = top_blob.w;
21     int outh = top_blob.h;
22     int outch = top_blob.c;
23 
24     const int maxk = kernel_w * kernel_h;
25 
26     // kernel offsets
27     std::vector<int> _space_ofs(maxk);
28     int* space_ofs = &_space_ofs[0];
29     {
30         int p1 = 0;
31         int p2 = 0;
32         int gap = w * dilation_h - kernel_w * dilation_w;
33         for (int i = 0; i < kernel_h; i++)
34         {
35             for (int j = 0; j < kernel_w; j++)
36             {
37                 space_ofs[p1] = p2;
38                 p1++;
39                 p2 += dilation_w;
40             }
41             p2 += gap;
42         }
43     }
44 
45     // num_output
46     #pragma omp parallel for num_threads(opt.num_threads)
47     for (int p = 0; p < outch; p++)
48     {
49         int* outptr = top_blob.channel(p);
50 
51         for (int i = 0; i < outh; i++)
52         {
53             for (int j = 0; j < outw; j++)
54             {
55                 int sum = 0;
56 
57                 const signed char* kptr = weight_data_int8.channel(p);
58 
59                 // channels
60                 for (int q = 0; q < channels; q++)
61                 {
62                     const Mat m = bottom_blob.channel(q);
63                     const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w * 8;
64 
65                     for (int k = 0; k < maxk; k++)
66                     {
67                         // TODO use _mm_cvtepi8_epi16 on sse4.1
68                         __m128i _val = _mm_loadl_epi64((const __m128i*)(sptr + space_ofs[k] * 8));
69                         _val = _mm_unpacklo_epi8(_val, _mm_cmpgt_epi8(_mm_setzero_si128(), _val));
70 
71                         __m128i _w = _mm_loadl_epi64((const __m128i*)kptr);
72                         _w = _mm_unpacklo_epi8(_w, _mm_cmpgt_epi8(_mm_setzero_si128(), _w));
73 
74                         __m128i _sl = _mm_mullo_epi16(_val, _w);
75                         __m128i _sh = _mm_mulhi_epi16(_val, _w);
76                         __m128i _s0 = _mm_unpacklo_epi16(_sl, _sh);
77                         __m128i _s1 = _mm_unpackhi_epi16(_sl, _sh);
78 
79                         __m128i _s4 = _mm_add_epi32(_s0, _s1);
80 
81                         // TODO use _mm_hadd_epi32 on ssse3
82                         int s4[4];
83                         _mm_storeu_si128((__m128i*)s4, _s4);
84                         sum += s4[0] + s4[1] + s4[2] + s4[3]; // dot
85 
86                         kptr += 8;
87                     }
88                 }
89 
90                 outptr[j] = sum;
91             }
92 
93             outptr += outw;
94         }
95     }
96 }
97