1 // Tencent is pleased to support the open source community by making ncnn available.
2 //
3 // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
4 //
5 // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
6 // in compliance with the License. You may obtain a copy of the License at
7 //
8 // https://opensource.org/licenses/BSD-3-Clause
9 //
10 // Unless required by applicable law or agreed to in writing, software distributed
11 // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
12 // CONDITIONS OF ANY KIND, either express or implied. See the License for the
13 // specific language governing permissions and limitations under the License.
14 
15 #include "swish_x86.h"
16 
17 #include "x86_activation.h"
18 
19 #include <math.h>
20 
21 namespace ncnn {
22 
Swish_x86()23 Swish_x86::Swish_x86()
24 {
25 #if __SSE2__
26     support_packing = true;
27 #endif // __SSE2__
28 }
29 
forward_inplace(Mat & bottom_top_blob,const Option & opt) const30 int Swish_x86::forward_inplace(Mat& bottom_top_blob, const Option& opt) const
31 {
32     int w = bottom_top_blob.w;
33     int h = bottom_top_blob.h;
34     int channels = bottom_top_blob.c;
35     int size = w * h;
36 #if __SSE2__
37     int elempack = bottom_top_blob.elempack;
38 
39 #if __AVX__
40     if (elempack == 8)
41     {
42         #pragma omp parallel for num_threads(opt.num_threads)
43         for (int q = 0; q < channels; q++)
44         {
45             float* ptr = bottom_top_blob.channel(q);
46 
47             for (int i = 0; i < size; i++)
48             {
49                 __m256 _p = _mm256_loadu_ps(ptr);
50                 _mm256_storeu_ps(ptr, _mm256_mul_ps(_p, sigmoid_avx(_p)));
51                 ptr += 8;
52             }
53         }
54 
55         return 0;
56     }
57 #endif // __AVX__
58 
59     if (elempack == 4)
60     {
61         #pragma omp parallel for num_threads(opt.num_threads)
62         for (int q = 0; q < channels; q++)
63         {
64             float* ptr = bottom_top_blob.channel(q);
65 
66             for (int i = 0; i < size; i++)
67             {
68                 __m128 _p = _mm_loadu_ps(ptr);
69                 _mm_storeu_ps(ptr, _mm_mul_ps(_p, sigmoid_sse(_p)));
70                 ptr += 4;
71             }
72         }
73 
74         return 0;
75     }
76 #endif // __SSE2__
77 
78     #pragma omp parallel for num_threads(opt.num_threads)
79     for (int q = 0; q < channels; q++)
80     {
81         float* ptr = bottom_top_blob.channel(q);
82 
83         int i = 0;
84 #if __SSE2__
85 #if __AVX__
86         for (; i + 7 < size; i += 8)
87         {
88             __m256 _p = _mm256_loadu_ps(ptr);
89             _mm256_storeu_ps(ptr, _mm256_mul_ps(_p, sigmoid_avx(_p)));
90             ptr += 8;
91         }
92 #endif // __AVX__
93         for (; i + 3 < size; i += 4)
94         {
95             __m128 _p = _mm_loadu_ps(ptr);
96             _mm_storeu_ps(ptr, _mm_mul_ps(_p, sigmoid_sse(_p)));
97             ptr += 4;
98         }
99 #endif // __SSE2__
100         for (; i < size; i++)
101         {
102             *ptr = *ptr / (1.f + exp(-*ptr));
103             ptr++;
104         }
105     }
106 
107     return 0;
108 }
109 
110 } // namespace ncnn
111