1 #include "gemm.h"
2 #include "utils.h"
3 #include "im2col.h"
4 #include "dark_cuda.h"
5 #include <stdlib.h>
6 #include <stdio.h>
7 #include <math.h>
8 #include <float.h>
9 #include <string.h>
10 #include <stdint.h>
11 #ifdef _WIN32
12 #include <intrin.h>
13 #endif
14 #if defined(_OPENMP)
15 #include <omp.h>
16 #endif
17
18 #define TILE_M 4 // 4 ops
19 #define TILE_N 16 // AVX2 = 2 ops * 8 floats
20 #define TILE_K 16 // loop
21 #ifdef __cplusplus
22 #define PUT_IN_REGISTER
23 #else
24 #define PUT_IN_REGISTER register
25 #endif
26
gemm_bin(int M,int N,int K,float ALPHA,char * A,int lda,float * B,int ldb,float * C,int ldc)27 void gemm_bin(int M, int N, int K, float ALPHA,
28 char *A, int lda,
29 float *B, int ldb,
30 float *C, int ldc)
31 {
32 int i,j,k;
33 for(i = 0; i < M; ++i){
34 for(k = 0; k < K; ++k){
35 char A_PART = A[i*lda+k];
36 if(A_PART){
37 for(j = 0; j < N; ++j){
38 C[i*ldc+j] += B[k*ldb+j];
39 }
40 } else {
41 for(j = 0; j < N; ++j){
42 C[i*ldc+j] -= B[k*ldb+j];
43 }
44 }
45 }
46 }
47 }
48
random_matrix(int rows,int cols)49 float *random_matrix(int rows, int cols)
50 {
51 int i;
52 float* m = (float*)xcalloc(rows * cols, sizeof(float));
53 for(i = 0; i < rows*cols; ++i){
54 m[i] = (float)rand()/RAND_MAX;
55 }
56 return m;
57 }
58
time_random_matrix(int TA,int TB,int m,int k,int n)59 void time_random_matrix(int TA, int TB, int m, int k, int n)
60 {
61 float *a;
62 if(!TA) a = random_matrix(m,k);
63 else a = random_matrix(k,m);
64 int lda = (!TA)?k:m;
65 float *b;
66 if(!TB) b = random_matrix(k,n);
67 else b = random_matrix(n,k);
68 int ldb = (!TB)?n:k;
69
70 float *c = random_matrix(m,n);
71 int i;
72 clock_t start = clock(), end;
73 for(i = 0; i<10; ++i){
74 gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
75 }
76 end = clock();
77 printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
78 free(a);
79 free(b);
80 free(c);
81 }
82
83
gemm(int TA,int TB,int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float BETA,float * C,int ldc)84 void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
85 float *A, int lda,
86 float *B, int ldb,
87 float BETA,
88 float *C, int ldc)
89 {
90 gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
91 }
92
93
94 //--------------------------------------------
95 // XNOR bitwise GEMM for binary neural network
96 //--------------------------------------------
97
98
xnor(unsigned char a,unsigned char b)99 static inline unsigned char xnor(unsigned char a, unsigned char b) {
100 //return a == b;
101 return !(a^b);
102 }
103
104 // INT-32
get_bit_int32(uint32_t const * const src,size_t index)105 static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) {
106 size_t src_i = index / 32;
107 int src_shift = index % 32;
108 unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
109 return val;
110 }
111
xnor_int32(uint32_t a,uint32_t b)112 static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
113 return ~(a^b);
114 }
115
xnor_int64(uint64_t a,uint64_t b)116 static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
117 return ~(a^b);
118 }
119
120
fill_bit_int32(char src)121 static inline uint32_t fill_bit_int32(char src) {
122 if (src == 0) return 0x00000000;
123 else return 0xFFFFFFFF;
124 }
125
fill_bit_int64(char src)126 static inline uint64_t fill_bit_int64(char src) {
127 if (src == 0) return 0x0000000000000000;
128 else return 0xFFFFFFFFFFFFFFFF;
129 }
130
binary_int32_printf(uint32_t src)131 void binary_int32_printf(uint32_t src) {
132 int i;
133 for (i = 0; i < 32; ++i) {
134 if (src & 1) printf("1");
135 else printf("0");
136 src = src >> 1;
137 }
138 printf("\n");
139 }
140
binary_int64_printf(uint64_t src)141 void binary_int64_printf(uint64_t src) {
142 int i;
143 for (i = 0; i < 64; ++i) {
144 if (src & 1) printf("1");
145 else printf("0");
146 src = src >> 1;
147 }
148 printf("\n");
149 }
150
151 /*
152 void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
153 unsigned char *A, int lda,
154 unsigned char *B, int ldb,
155 float *C, int ldc, float *mean_arr)
156 {
157 int *count_arr = xcalloc(M*N, sizeof(int));
158
159 int i, j, k;
160 for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
161 for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
162 char a_bit = get_bit(A, i*lda + k);
163
164 for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
165 char b_bit = get_bit(B, k*ldb + j);
166 count_arr[i*ldc + j] += xnor(a_bit, b_bit);
167 }
168 }
169 }
170
171 for (i = 0; i < M; ++i) {
172 float mean_val = mean_arr[i];
173 for (j = 0; j < N; ++j) {
174 C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
175 }
176 }
177 free(count_arr);
178 }
179 */
180
181 /*
182 void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
183 unsigned char *A, int lda,
184 unsigned char *B, int ldb,
185 float *C, int ldc, float *mean_arr)
186 {
187 int *count_arr = xcalloc(M*N, sizeof(int));
188
189 int i, j, k;
190 for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
191 for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
192 for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
193 char a_bit = get_bit(A, i*lda + k);
194 char b_bit = get_bit(B, j*ldb + k);
195 count_arr[i*ldc + j] += xnor(a_bit, b_bit);
196 }
197 }
198 }
199
200 for (i = 0; i < M; ++i) {
201 float mean_val = mean_arr[i];
202 for (j = 0; j < N; ++j) {
203 C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
204 }
205 }
206 free(count_arr);
207 }
208 */
209
210 /*
211 void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
212 unsigned char *A, int lda,
213 unsigned char *B, int ldb,
214 float *C, int ldc, float *mean_arr)
215 {
216 int *count_arr = xcalloc(M*N, sizeof(int));
217
218 int i;
219
220 #pragma omp parallel for
221 for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
222 int j, k, h;
223 for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
224 const char a_bit = get_bit(A, i*lda + k);
225 uint64_t a_bit64 = fill_bit_int64(a_bit);
226 int k_ldb = k*ldb;
227
228 for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056]
229 if ((N - j > 64) && (k_ldb % 8 == 0)) {
230 uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8));
231 uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
232 //printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc
233 printf("\n %d \n", __popcnt64(c_bit64)); // msvs
234
235 int h;
236 for (h = 0; h < 64; ++h)
237 if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1;
238
239 //binary_int64_printf(a_bit64);
240 //binary_int64_printf(b_bit64);
241 //binary_int64_printf(c_bit64);
242 }
243 else {
244 for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
245 char b_bit = get_bit(B, k_ldb + j);
246 if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1;
247 }
248 }
249
250 }
251 }
252 }
253
254 if (mean_arr) {
255 //int K_2 = K / 2;
256 for (i = 0; i < M; ++i) {
257 float mean_val = mean_arr[i];
258 //float mean_val2 = 2 * mean_val;
259 for (j = 0; j < N; ++j) {
260 C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
261 //C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2;
262 }
263 }
264 }
265 else {
266 for (i = 0; i < M; ++i) {
267 for (j = 0; j < N; ++j) {
268 C[i*ldc + j] = count_arr[i*ldc + j] - K / 2;
269 }
270 }
271 }
272
273 free(count_arr);
274
275 //getchar();
276 }
277 */
278
279
280 /*
281 void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
282 unsigned char *A, int lda,
283 unsigned char *B, int ldb,
284 float *C, int ldc, float *mean_arr)
285 {
286 int i;
287
288 #pragma omp parallel for
289 for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
290 int j, k, h;
291 float mean_val = mean_arr[i];
292
293 for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
294 int count = 0;
295
296 for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
297 uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
298 uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
299 uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
300
301 #ifdef WIN32
302 int tmp_count = __popcnt64(c_bit64);
303 #else
304 int tmp_count = __builtin_popcountll(c_bit64);
305 #endif
306
307 if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
308 count += tmp_count;
309 //binary_int64_printf(c_bit64);
310 //printf(", count = %d \n\n", tmp_count);
311 }
312
313 C[i*ldc + j] = (2 * count - K) * mean_val;
314 }
315 }
316 }
317 */
318
319 //----------------------------
320
321 // is not used
322 /*
323 void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
324 {
325 unsigned int x, y;
326 for (y = 0; y < 32; ++y) {
327 for (x = 0; x < 32; ++x) {
328 if (A[y * lda] & ((uint32_t)1 << x)) B[x * ldb] |= (uint32_t)1 << y;
329 }
330 }
331 }
332 */
333
334 #ifndef GPU
reverse_8_bit(uint8_t a)335 uint8_t reverse_8_bit(uint8_t a) {
336 return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
337 }
338
reverse_32_bit(uint32_t a)339 uint32_t reverse_32_bit(uint32_t a)
340 {
341 // unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
342 return (reverse_8_bit(a >> 24) << 0) |
343 (reverse_8_bit(a >> 16) << 8) |
344 (reverse_8_bit(a >> 8) << 16) |
345 (reverse_8_bit(a >> 0) << 24);
346 }
347
348 #define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
349
transpose32_optimized(uint32_t A[32])350 void transpose32_optimized(uint32_t A[32]) {
351 int j, k;
352 unsigned m, t;
353
354 //m = 0x0000FFFF;
355 //for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
356 // for (k = 0; k < 32; k = (k + j + 1) & ~j) {
357 // t = (A[k] ^ (A[k + j] >> j)) & m;
358 // A[k] = A[k] ^ t;
359 // A[k + j] = A[k + j] ^ (t << j);
360 // }
361 //}
362
363 j = 16;
364 m = 0x0000FFFF;
365 for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
366
367 j = 8;
368 m = 0x00ff00ff;
369 for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
370
371 j = 4;
372 m = 0x0f0f0f0f;
373 for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
374
375 j = 2;
376 m = 0x33333333;
377 for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
378
379 j = 1;
380 m = 0x55555555;
381 for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
382
383 // reverse Y
384 for (j = 0; j < 16; ++j) {
385 uint32_t tmp = A[j];
386 A[j] = reverse_32_bit(A[31 - j]);
387 A[31 - j] = reverse_32_bit(tmp);
388 }
389 }
390
transpose_32x32_bits_reversed_diagonale(uint32_t * A,uint32_t * B,int m,int n)391 void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
392 {
393 unsigned A_tmp[32];
394 int i;
395 #pragma unroll
396 for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
397 transpose32_optimized(A_tmp);
398 #pragma unroll
399 for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
400 }
401
402
transpose_8x8_bits_my(unsigned char * A,unsigned char * B,int lda,int ldb)403 void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
404 {
405 unsigned x, y;
406 for (y = 0; y < 8; ++y) {
407 for (x = 0; x < 8; ++x) {
408 if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
409 }
410 }
411 }
412
reverse_byte_1(char a)413 unsigned char reverse_byte_1(char a)
414 {
415 return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
416 ((a & 0x4) << 3) | ((a & 0x8) << 1) |
417 ((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
418 ((a & 0x40) >> 5) | ((a & 0x80) >> 7);
419 }
420
reverse_byte(unsigned char a)421 unsigned char reverse_byte(unsigned char a)
422 {
423 return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
424 }
425
426 static unsigned char lookup[16] = {
427 0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
428 0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, };
429
reverse_byte_3(unsigned char n)430 unsigned char reverse_byte_3(unsigned char n) {
431 // Reverse the top and bottom nibble then swap them.
432 return (lookup[n & 0b1111] << 4) | lookup[n >> 4];
433 }
434
435
transpose8rS32_reversed_diagonale(unsigned char * A,unsigned char * B,int m,int n)436 void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n)
437 {
438 unsigned x, y, t;
439
440 x = y = 0;
441 // Load the array and pack it into x and y.
442 //x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
443 //y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
444
445 t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
446 t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
447
448 t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
449 t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
450
451 t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
452 y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
453 x = t;
454
455 B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
456 B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
457 }
458
459 /*
460 // transpose by 8-bit
461 void transpose_bin(char *A, char *B, const int n, const int m,
462 const int lda, const int ldb, const int block_size)
463 {
464 //printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda);
465 int i;
466 #pragma omp parallel for
467 for (i = 0; i < n; i += 8) {
468 int j;
469 for (j = 0; j < m; j += 8) {
470 int a_index = i*lda + j;
471 int b_index = j*ldb + i;
472 //transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8);
473 transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8);
474 }
475 for (; j < m; ++j) {
476 if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
477 }
478 }
479 }
480 */
481
482 #endif
483
484 // transpose by 32-bit
transpose_bin(uint32_t * A,uint32_t * B,const int n,const int m,const int lda,const int ldb,const int block_size)485 void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
486 const int lda, const int ldb, const int block_size)
487 {
488 //printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32);
489 //printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32);
490 int i;
491 #pragma omp parallel for
492 for (i = 0; i < n; i += 32) {
493 int j;
494 for (j = 0; j < m; j += 32) {
495 int a_index = i*lda + j;
496 int b_index = j*ldb + i;
497 transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
498 //transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32);
499 }
500 for (; j < m; ++j) {
501 if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i);
502 }
503 }
504 }
505
popcnt_32(uint32_t val32)506 static inline int popcnt_32(uint32_t val32) {
507 #ifdef WIN32 // Windows MSVS
508 int tmp_count = __popcnt(val32);
509 #else // Linux GCC
510 int tmp_count = __builtin_popcount(val32);
511 #endif
512 return tmp_count;
513 }
514 //----------------------------
515
516 #if (defined(__AVX__) && defined(__x86_64__)) || (defined(_WIN64) && !defined(__MINGW32__))
517
518 #if (defined(_WIN64) && !defined(__MINGW64__))
519 #include <intrin.h>
520 #include <ammintrin.h>
521 #include <immintrin.h>
522 #include <smmintrin.h>
523
524 #if defined(_MSC_VER) && _MSC_VER <= 1900
_mm256_extract_epi64(__m256i a,const int index)525 static inline __int32 _mm256_extract_epi64(__m256i a, const int index) {
526 return a.m256i_i64[index];
527 }
528
_mm256_extract_epi32(__m256i a,const int index)529 static inline __int32 _mm256_extract_epi32(__m256i a, const int index) {
530 return a.m256i_i32[index];
531 }
532 #endif
533
_castu32_f32(uint32_t a)534 static inline float _castu32_f32(uint32_t a) {
535 return *((float *)&a);
536 }
537
_mm256_extract_float32(__m256 a,const int index)538 static inline float _mm256_extract_float32(__m256 a, const int index) {
539 return a.m256_f32[index];
540 }
541
542 #else // Linux GCC/Clang
543 #include <x86intrin.h>
544 #include <ammintrin.h>
545 #include <immintrin.h>
546 #include <smmintrin.h>
547 #include <cpuid.h>
548
_castu32_f32(uint32_t a)549 static inline float _castu32_f32(uint32_t a) {
550 return *((float *)&a);
551 }
552
_mm256_extract_float32(__m256 a,const int index)553 static inline float _mm256_extract_float32(__m256 a, const int index) {
554 switch(index) {
555 case 0:
556 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
557 case 1:
558 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 1));
559 case 2:
560 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 2));
561 case 3:
562 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 3));
563 case 4:
564 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 4));
565 case 5:
566 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 5));
567 case 6:
568 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 6));
569 case 7:
570 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 7));
571 default:
572 return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
573 }
574 }
575
asm_cpuid(uint32_t * abcd,uint32_t eax)576 void asm_cpuid(uint32_t* abcd, uint32_t eax)
577 {
578 uint32_t ebx = 0, edx = 0, ecx = 0;
579
580 // EBX is saved to EDI and later restored
581 __asm__("movl %%ebx, %%edi;"
582 "cpuid;"
583 "xchgl %%ebx, %%edi;"
584 : "=D"(ebx),
585 "+a"(eax), "+c"(ecx), "=d"(edx));
586
587 abcd[0] = eax;
588 abcd[1] = ebx;
589 abcd[2] = ecx;
590 abcd[3] = edx;
591 }
592 #endif
593
594
595
596 #ifdef _WIN32
597 // Windows
598 #define cpuid(info, x) __cpuidex(info, x, 0)
599 #else
600 // GCC Intrinsics
cpuid(int info[4],int InfoType)601 void cpuid(int info[4], int InfoType) {
602 __cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
603 }
604 #endif
605
606
607 // Misc.
608 static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1;
609 static int HW_ABM; // Advanced Bit Manipulation
610
611 // SIMD: 128-bit
612 static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA;
613
614 // SIMD: 256-bit
615 static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2;
616
617 // SIMD: 512-bit
618 static int HW_AVX512F; // AVX512 Foundation
619 static int HW_AVX512CD; // AVX512 Conflict Detection
620 static int HW_AVX512PF; // AVX512 Prefetch
621 static int HW_AVX512ER; // AVX512 Exponential + Reciprocal
622 static int HW_AVX512VL; // AVX512 Vector Length Extensions
623 static int HW_AVX512BW; // AVX512 Byte + Word
624 static int HW_AVX512DQ; // AVX512 Doubleword + Quadword
625 static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add
626 static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions
627
628 // https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set
check_cpu_features(void)629 void check_cpu_features(void) {
630 int info[4];
631 cpuid(info, 0);
632 int nIds = info[0];
633
634 cpuid(info, 0x80000000);
635 unsigned nExIds = info[0];
636
637 // Detect Features
638 if (nIds >= 0x00000001) {
639 cpuid(info, 0x00000001);
640 HW_MMX = (info[3] & ((uint32_t)1 << 23)) != 0;
641 HW_SSE = (info[3] & ((uint32_t)1 << 25)) != 0;
642 HW_SSE2 = (info[3] & ((uint32_t)1 << 26)) != 0;
643 HW_SSE3 = (info[2] & ((uint32_t)1 << 0)) != 0;
644
645 HW_SSSE3 = (info[2] & ((uint32_t)1 << 9)) != 0;
646 HW_SSE41 = (info[2] & ((uint32_t)1 << 19)) != 0;
647 HW_SSE42 = (info[2] & ((uint32_t)1 << 20)) != 0;
648 HW_AES = (info[2] & ((uint32_t)1 << 25)) != 0;
649
650 HW_AVX = (info[2] & ((uint32_t)1 << 28)) != 0;
651 HW_FMA3 = (info[2] & ((uint32_t)1 << 12)) != 0;
652
653 HW_RDRAND = (info[2] & ((uint32_t)1 << 30)) != 0;
654 }
655 if (nIds >= 0x00000007) {
656 cpuid(info, 0x00000007);
657 HW_AVX2 = (info[1] & ((uint32_t)1 << 5)) != 0;
658
659 HW_BMI1 = (info[1] & ((uint32_t)1 << 3)) != 0;
660 HW_BMI2 = (info[1] & ((uint32_t)1 << 8)) != 0;
661 HW_ADX = (info[1] & ((uint32_t)1 << 19)) != 0;
662 HW_SHA = (info[1] & ((uint32_t)1 << 29)) != 0;
663 HW_PREFETCHWT1 = (info[2] & ((uint32_t)1 << 0)) != 0;
664
665 HW_AVX512F = (info[1] & ((uint32_t)1 << 16)) != 0;
666 HW_AVX512CD = (info[1] & ((uint32_t)1 << 28)) != 0;
667 HW_AVX512PF = (info[1] & ((uint32_t)1 << 26)) != 0;
668 HW_AVX512ER = (info[1] & ((uint32_t)1 << 27)) != 0;
669 HW_AVX512VL = (info[1] & ((uint32_t)1 << 31)) != 0;
670 HW_AVX512BW = (info[1] & ((uint32_t)1 << 30)) != 0;
671 HW_AVX512DQ = (info[1] & ((uint32_t)1 << 17)) != 0;
672 HW_AVX512IFMA = (info[1] & ((uint32_t)1 << 21)) != 0;
673 HW_AVX512VBMI = (info[2] & ((uint32_t)1 << 1)) != 0;
674 }
675 if (nExIds >= 0x80000001) {
676 cpuid(info, 0x80000001);
677 HW_x64 = (info[3] & ((uint32_t)1 << 29)) != 0;
678 HW_ABM = (info[2] & ((uint32_t)1 << 5)) != 0;
679 HW_SSE4a = (info[2] & ((uint32_t)1 << 6)) != 0;
680 HW_FMA4 = (info[2] & ((uint32_t)1 << 16)) != 0;
681 HW_XOP = (info[2] & ((uint32_t)1 << 11)) != 0;
682 }
683 }
684
is_avx()685 int is_avx() {
686 static int result = -1;
687 if (result == -1) {
688 check_cpu_features();
689 result = HW_AVX;
690 if (result == 1) printf(" Used AVX \n");
691 else printf(" Not used AVX \n");
692 }
693 return result;
694 }
695
is_fma_avx2()696 int is_fma_avx2() {
697 static int result = -1;
698 if (result == -1) {
699 check_cpu_features();
700 result = HW_FMA3 && HW_AVX2;
701 if (result == 1) printf(" Used FMA & AVX2 \n");
702 else printf(" Not used FMA & AVX2 \n");
703 }
704 return result;
705 }
706
707 // https://software.intel.com/sites/landingpage/IntrinsicsGuide
gemm_nn(int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float * C,int ldc)708 void gemm_nn(int M, int N, int K, float ALPHA,
709 float *A, int lda,
710 float *B, int ldb,
711 float *C, int ldc)
712 {
713 int i, j, k;
714 if (is_avx() == 1) { // AVX
715 for (i = 0; i < M; ++i) {
716 for (k = 0; k < K; ++k) {
717 float A_PART = ALPHA*A[i*lda + k];
718 __m256 a256, b256, c256, result256; // AVX
719 a256 = _mm256_set1_ps(A_PART);
720 for (j = 0; j < N - 8; j += 8) {
721 b256 = _mm256_loadu_ps(&B[k*ldb + j]);
722 c256 = _mm256_loadu_ps(&C[i*ldc + j]);
723 // FMA - Intel Haswell (2013), AMD Piledriver (2012)
724 //result256 = _mm256_fmadd_ps(a256, b256, c256);
725 result256 = _mm256_mul_ps(a256, b256);
726 result256 = _mm256_add_ps(result256, c256);
727 _mm256_storeu_ps(&C[i*ldc + j], result256);
728 }
729
730 int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
731 for (j = prev_end; j < N; ++j)
732 C[i*ldc + j] += A_PART*B[k*ldb + j];
733 }
734 }
735 }
736 else {
737 for (i = 0; i < M; ++i) {
738 for (k = 0; k < K; ++k) {
739 PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
740 for (j = 0; j < N; ++j) {
741 C[i*ldc + j] += A_PART*B[k*ldb + j];
742 }
743 /* // SSE
744 __m128 a128, b128, c128, result128; // SSE
745 a128 = _mm_set1_ps(A_PART);
746 for (j = 0; j < N - 4; j += 4) {
747 b128 = _mm_loadu_ps(&B[k*ldb + j]);
748 c128 = _mm_loadu_ps(&C[i*ldc + j]);
749 //result128 = _mm_fmadd_ps(a128, b128, c128);
750 result128 = _mm_mul_ps(a128, b128);
751 result128 = _mm_add_ps(result128, c128);
752 _mm_storeu_ps(&C[i*ldc + j], result128);
753 }
754
755 int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4;
756 for (j = prev_end; j < N; ++j){
757 C[i*ldc + j] += A_PART*B[k*ldb + j];
758 }
759 */
760 }
761 }
762 }
763 }
764
765
766
gemm_nn_fast(int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float * C,int ldc)767 void gemm_nn_fast(int M, int N, int K, float ALPHA,
768 float *A, int lda,
769 float *B, int ldb,
770 float *C, int ldc)
771 {
772 int i;
773
774 #pragma omp parallel for
775 for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M)
776 {
777 int j, k;
778 int i_d, k_d;
779
780 for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K)
781 {
782 for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N)
783 {
784 // L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB
785 // L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB
786 // L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB
787
788 __m256 result256;
789 __m256 a256_0, b256_0; // AVX
790 __m256 a256_1, b256_1; // AVX
791 __m256 a256_2;// , b256_2; // AVX
792 __m256 a256_3;// , b256_3; // AVX
793 __m256 c256_0, c256_1, c256_2, c256_3;
794 __m256 c256_4, c256_5, c256_6, c256_7;
795
796 c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]);
797 c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]);
798 c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]);
799 c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]);
800
801 c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]);
802 c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]);
803 c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]);
804 c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]);
805
806
807 for (k_d = 0; k_d < (TILE_K); ++k_d)
808 {
809 a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]);
810 a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]);
811
812 a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]);
813 a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]);
814
815
816 b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]);
817 b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]);
818
819 // FMA - Intel Haswell (2013), AMD Piledriver (2012)
820 //c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0);
821 //c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1);
822 //c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2);
823 //c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3);
824
825 //c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4);
826 //c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5);
827 //c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6);
828 //c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7);
829
830 result256 = _mm256_mul_ps(a256_0, b256_0);
831 c256_0 = _mm256_add_ps(result256, c256_0);
832
833 result256 = _mm256_mul_ps(a256_1, b256_0);
834 c256_1 = _mm256_add_ps(result256, c256_1);
835
836 result256 = _mm256_mul_ps(a256_0, b256_1);
837 c256_2 = _mm256_add_ps(result256, c256_2);
838
839 result256 = _mm256_mul_ps(a256_1, b256_1);
840 c256_3 = _mm256_add_ps(result256, c256_3);
841
842
843 result256 = _mm256_mul_ps(a256_2, b256_0);
844 c256_4 = _mm256_add_ps(result256, c256_4);
845
846 result256 = _mm256_mul_ps(a256_3, b256_0);
847 c256_5 = _mm256_add_ps(result256, c256_5);
848
849 result256 = _mm256_mul_ps(a256_2, b256_1);
850 c256_6 = _mm256_add_ps(result256, c256_6);
851
852 result256 = _mm256_mul_ps(a256_3, b256_1);
853 c256_7 = _mm256_add_ps(result256, c256_7);
854 }
855 _mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0);
856 _mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1);
857 _mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2);
858 _mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3);
859
860 _mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4);
861 _mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5);
862 _mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6);
863 _mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7);
864 }
865
866 for (j = (N / TILE_N)*TILE_N; j < N; ++j) {
867 for (i_d = i; i_d < (i + TILE_M); ++i_d)
868 {
869 for (k_d = k; k_d < (k + TILE_K); ++k_d)
870 {
871 PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d];
872 C[i_d*ldc + j] += A_PART*B[k_d*ldb + j];
873 }
874 }
875 }
876 }
877
878 for (k = (K / TILE_K)*TILE_K; k < K; ++k)
879 {
880 for (i_d = i; i_d < (i + TILE_M); ++i_d)
881 {
882 PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k];
883 for (j = 0; j < N; ++j) {
884 C[i_d*ldc + j] += A_PART*B[k*ldb + j];
885 }
886 }
887 }
888 }
889
890 for (i = (M / TILE_M)*TILE_M; i < M; ++i) {
891 int j, k;
892 for (k = 0; k < K; ++k) {
893 PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
894 for (j = 0; j < N; ++j) {
895 C[i*ldc + j] += A_PART*B[k*ldb + j];
896 }
897 }
898 }
899 }
900
901
902
gemm_nn_bin_32bit_packed(int M,int N,int K,float ALPHA,uint32_t * A,int lda,uint32_t * B,int ldb,float * C,int ldc,float * mean_arr)903 void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
904 uint32_t *A, int lda,
905 uint32_t *B, int ldb,
906 float *C, int ldc, float *mean_arr)
907 {
908 int i;
909 #pragma omp parallel for
910 for (i = 0; i < M; ++i) { // l.n
911 int j, s;
912 float mean_val = mean_arr[i];
913 //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
914 for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
915 {
916 PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s];
917 __m256i a256 = _mm256_set1_epi32(A_PART);
918
919 for (j = 0; j < N - 8; j += 8)
920 {
921 __m256i b256 = *((__m256i*)&B[s*ldb + j]);
922 __m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b)
923 __m256i all_1 = _mm256_set1_epi8((char)255);
924 __m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
925
926 // waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
927 __m256 count = _mm256_setr_ps(
928 popcnt_32(_mm256_extract_epi32(xnor256, 0)),
929 popcnt_32(_mm256_extract_epi32(xnor256, 1)),
930 popcnt_32(_mm256_extract_epi32(xnor256, 2)),
931 popcnt_32(_mm256_extract_epi32(xnor256, 3)),
932 popcnt_32(_mm256_extract_epi32(xnor256, 4)),
933 popcnt_32(_mm256_extract_epi32(xnor256, 5)),
934 popcnt_32(_mm256_extract_epi32(xnor256, 6)),
935 popcnt_32(_mm256_extract_epi32(xnor256, 7)));
936
937 __m256 val2 = _mm256_set1_ps(2);
938 count = _mm256_mul_ps(count, val2); // count * 2
939
940 __m256 val32 = _mm256_set1_ps(32);
941 count = _mm256_sub_ps(count, val32); // count - 32
942
943 __m256 mean256 = _mm256_set1_ps(mean_val);
944 count = _mm256_mul_ps(count, mean256); // count * mean_val
945
946 __m256 c256 = *((__m256*)&C[i*ldc + j]);
947 count = _mm256_add_ps(count, c256); // c = c + count
948 *((__m256*)&C[i*ldc + j]) = count;
949 }
950
951 for (; j < N; ++j) // out_h*out_w;
952 {
953 PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j];
954 uint32_t xnor_result = ~(A_PART ^ B_PART);
955 int32_t count = popcnt_32(xnor_result); // must be Signed int
956
957 C[i*ldc + j] += (2 * count - 32) * mean_val;
958 }
959 }
960 }
961 }
962
convolution_2d_old(int w,int h,int ksize,int n,int c,int pad,int stride,float * weights,float * input,float * output)963 void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride,
964 float *weights, float *input, float *output)
965 {
966 //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
967 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
968
969 int fil;
970 // filter index
971 #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
972 for (fil = 0; fil < n; ++fil) {
973 //int i, f, j;
974 int chan, y, x, f_y, f_x;
975 // channel index
976 for (chan = 0; chan < c; ++chan)
977 // input - y
978 for (y = 0; y < h; ++y)
979 // input - x
980 for (x = 0; x < w; ++x)
981 {
982 int const output_index = fil*w*h + y*w + x;
983 int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
984 int const input_pre_index = chan*w*h;
985 float sum = 0;
986
987 // filter - y
988 for (f_y = 0; f_y < ksize; ++f_y)
989 {
990 int input_y = y + f_y - pad;
991 // filter - x
992 for (f_x = 0; f_x < ksize; ++f_x)
993 {
994 int input_x = x + f_x - pad;
995 if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
996
997 int input_index = input_pre_index + input_y*w + input_x;
998 int weights_index = weights_pre_index + f_y*ksize + f_x;
999
1000 sum += input[input_index] * weights[weights_index];
1001 }
1002 }
1003 // l.output[filters][width][height] +=
1004 // state.input[channels][width][height] *
1005 // l.weights[filters][channels][filter_width][filter_height];
1006 output[output_index] += sum;
1007 }
1008 }
1009 }
1010
convolution_2d(int w,int h,int ksize,int n,int c,int pad,int stride,float * weights,float * input,float * output,float * mean)1011 void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
1012 float *weights, float *input, float *output, float *mean)
1013 {
1014 //const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
1015 //const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
1016 int i;
1017
1018 #if defined(_OPENMP)
1019 static int max_num_threads = 0;
1020 if (max_num_threads == 0) {
1021 max_num_threads = omp_get_max_threads();
1022 //omp_set_num_threads( max_num_threads / 2);
1023 }
1024 #endif
1025
1026 //convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output);
1027
1028 __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
1029 for (i = 0; i < ksize*ksize*n*c; i+=8) {
1030 *((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1));
1031 }
1032
1033 //for (i = 0; i < w*h*c; i += 8) {
1034 //*((__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1));
1035 //}
1036
1037
1038 //__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF);
1039 //all256_last_zero.m256i_i32[7] = 0;
1040 __m256i all256_last_zero =
1041 _mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0);
1042
1043 __m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1);
1044 //__m256 all256_sing1 = _mm256_set1_ps(0x80000000);
1045 __m256 all256_one = _mm256_set1_ps(1);
1046 __m256i all256i_one = _mm256_set1_epi32(1);
1047
1048 ///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
1049 ///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
1050
1051 int fil;
1052 // filter index
1053 #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
1054 for (fil = 0; fil < n; ++fil) {
1055 int chan, y, x, f_y, f_x;
1056 float cur_mean = fabs(mean[fil]);
1057 __m256 mean256 = _mm256_set1_ps(cur_mean);
1058 // channel index
1059 //for (chan = 0; chan < c; ++chan)
1060 // input - y
1061 for (y = 0; y < h; ++y)
1062 // input - x
1063 for (x = 0; x < w-8; x+=8)
1064 {
1065 int const output_index = fil*w*h + y*w + x;
1066 float sum = 0;
1067 __m256 sum256 = _mm256_set1_ps(0);
1068
1069 for (chan = 0; chan < c; ++chan) {
1070 int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
1071 int const input_pre_index = chan*w*h;
1072
1073
1074 // filter - y
1075 for (f_y = 0; f_y < ksize; ++f_y)
1076 {
1077 int input_y = y + f_y - pad;
1078 //__m256 in = *((__m256*)&input[input_pre_index + input_y*w]);
1079 if (input_y < 0 || input_y >= h) continue;
1080 //__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]);
1081
1082 // filter - x
1083 for (f_x = 0; f_x < ksize; ++f_x)
1084 {
1085 int input_x = x + f_x - pad;
1086 //if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
1087
1088 int input_index = input_pre_index + input_y*w + input_x;
1089 int weights_index = weights_pre_index + f_y*ksize + f_x;
1090 //if (input_y < 0 || input_y >= h) continue;
1091
1092 //sum += input[input_index] * weights[weights_index];
1093
1094 __m256 in = *((__m256*)&input[input_index]);
1095 __m256 w = _mm256_set1_ps(weights[weights_index]);
1096 //__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats
1097 __m256 xor256 = _mm256_xor_ps(w, in);
1098 //printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]);
1099 //printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]);
1100
1101 //__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256);
1102
1103
1104 //sum256 = xor256;
1105 sum256 = _mm256_add_ps(xor256, sum256);
1106 //printf("\n --- \n");
1107 //printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]);
1108
1109 if (f_x < ksize-1) {
1110 //in = _mm256_permutevar8x32_ps(in, idx256);
1111 //in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero));
1112 }
1113 }
1114 }
1115 }
1116 // l.output[filters][width][height] +=
1117 // state.input[channels][width][height] *
1118 // l.weights[filters][channels][filter_width][filter_height];
1119 //output[output_index] += sum;
1120
1121 sum256 = _mm256_mul_ps(sum256, mean256);
1122 //printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n",
1123 // cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]);
1124
1125 //__m256 out = *((__m256*)&output[output_index]);
1126 //out = _mm256_add_ps(out, sum256);
1127 //*((__m256*)&output[output_index]) = out;
1128 *((__m256*)&output[output_index]) = sum256;
1129
1130 //_mm256_storeu_ps(&C[i*ldc + j], result256);
1131 }
1132 }
1133 }
1134
1135
1136
1137 // http://graphics.stanford.edu/~seander/bithacks.html
1138 // https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register
1139 // https://arxiv.org/pdf/1611.07612.pdf
1140
popcnt128(__m128i n)1141 static inline int popcnt128(__m128i n) {
1142 const __m128i n_hi = _mm_unpackhi_epi64(n, n);
1143 #if defined(_MSC_VER)
1144 return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi));
1145 #elif defined(__APPLE__) && defined(__clang__)
1146 return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi));
1147 #else
1148 return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi));
1149 #endif
1150 }
1151
popcnt256(__m256i n)1152 static inline int popcnt256(__m256i n) {
1153 return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1));
1154 }
1155
count256(__m256i v)1156 static inline __m256i count256(__m256i v) {
1157 __m256i lookup =
1158 _mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2,
1159 2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3,
1160 1, 2, 2, 3, 2, 3, 3, 4);
1161
1162 __m256i low_mask = _mm256_set1_epi8(0x0f);
1163
1164 __m256i lo = _mm256_and_si256(v, low_mask);
1165 __m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
1166 __m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
1167 __m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
1168 __m256i total = _mm256_add_epi8(popcnt1, popcnt2);
1169
1170 return _mm256_sad_epu8(total, _mm256_setzero_si256());
1171 }
1172
popcnt256_custom(__m256i n)1173 static inline int popcnt256_custom(__m256i n) {
1174 __m256i val = count256(n);
1175
1176 //return val.m256i_i64[0] +
1177 //val.m256i_i64[1] +
1178 //val.m256i_i64[2] +
1179 //val.m256i_i64[3];
1180 return _mm256_extract_epi64(val, 0)
1181 + _mm256_extract_epi64(val, 1)
1182 + _mm256_extract_epi64(val, 2)
1183 + _mm256_extract_epi64(val, 3);
1184 }
1185
xnor_avx2_popcnt(__m256i a_bit256,__m256i b_bit256,__m256i * count_sum)1186 static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) {
1187 __m256i c_bit256 = _mm256_set1_epi8((char)255);
1188
1189 __m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b))
1190 c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT
1191
1192 *count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm
1193 }
1194
1195 // 2nd part - popcnt Mula's algorithm
get_count_mula(__m256i count_sum)1196 static inline int get_count_mula(__m256i count_sum) {
1197 return _mm256_extract_epi64(count_sum, 0)
1198 + _mm256_extract_epi64(count_sum, 1)
1199 + _mm256_extract_epi64(count_sum, 2)
1200 + _mm256_extract_epi64(count_sum, 3);
1201 }
1202
1203 // 5x times faster than gemm()-float32
1204 // further optimizations: do mean-mult only for the last layer
gemm_nn_custom_bin_mean_transposed(int M,int N,int K,float ALPHA_UNUSED,unsigned char * A,int lda,unsigned char * B,int ldb,float * C,int ldc,float * mean_arr)1205 void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
1206 unsigned char *A, int lda,
1207 unsigned char *B, int ldb,
1208 float *C, int ldc, float *mean_arr)
1209 {
1210 int i;
1211
1212 #if defined(_OPENMP)
1213 static int max_num_threads = 0;
1214 if (max_num_threads == 0) {
1215 max_num_threads = omp_get_max_threads();
1216 //omp_set_num_threads(max_num_threads / 2);
1217 }
1218 #endif
1219
1220 //#pragma omp parallel for
1221 //for (i = 0; i < M; ++i)
1222 #pragma omp parallel for
1223 for (i = 0; i < (M/2)*2; i += 2)
1224 { // l.n - filters [16 - 55 - 1024]
1225 float mean_val_0 = mean_arr[i + 0];
1226 float mean_val_1 = mean_arr[i + 1];
1227 int j, k;
1228 //__m256i all_1 = _mm256_set1_epi8(255);
1229
1230 //for (j = 0; j < N; ++j)
1231 for (j = 0; j < (N/2)*2; j += 2)
1232 { // out_h*out_w - one channel output size [169 - 173056]
1233 //int count = 0;
1234 const int bit_step = 256;
1235 __m256i count_sum_0 = _mm256_set1_epi8(0);
1236 __m256i count_sum_1 = _mm256_set1_epi8(0);
1237 __m256i count_sum_2 = _mm256_set1_epi8(0);
1238 __m256i count_sum_3 = _mm256_set1_epi8(0);
1239
1240 for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
1241
1242 __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
1243 __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
1244
1245 __m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8));
1246 __m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8));
1247
1248
1249 xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0);
1250 xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1);
1251
1252 xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2);
1253 xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3);
1254
1255 //count += popcnt256(c_bit256);
1256 //binary_int64_printf(c_bit64);
1257 //printf(", count = %d \n\n", tmp_count);
1258 }
1259
1260 int count_0 = get_count_mula(count_sum_0);
1261 int count_1 = get_count_mula(count_sum_1);
1262 int count_2 = get_count_mula(count_sum_2);
1263 int count_3 = get_count_mula(count_sum_3);
1264
1265 const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
1266 count_0 = count_0 - f1; // remove extra bits (from empty space for align only)
1267 count_1 = count_1 - f1;
1268 count_2 = count_2 - f1;
1269 count_3 = count_3 - f1;
1270 C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0;
1271 C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0;
1272 C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1;
1273 C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1;
1274 }
1275
1276 int i_d;
1277 for (i_d = 0; i_d < 2; ++i_d)
1278 {
1279 float mean_val = mean_arr[i + i_d];
1280 for (j = (N / 2) * 2; j < N; j += 1)
1281 { // out_h*out_w - one channel output size [169 - 173056]
1282 const int bit_step = 256;
1283 __m256i count_sum = _mm256_set1_epi8(0);
1284
1285 for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
1286 __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8));
1287 __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
1288 xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
1289 }
1290 int count = get_count_mula(count_sum);
1291 const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
1292 count = count - f1; // remove extra bits (from empty space for align only)
1293 C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val;
1294 }
1295 }
1296 }
1297
1298 for (i = (M / 2) * 2; i < M; i += 1)
1299 {
1300 float mean_val = mean_arr[i];
1301 int j, k;
1302 for (j = 0; j < N; j += 1)
1303 { // out_h*out_w - one channel output size [169 - 173056]
1304 const int bit_step = 256;
1305 __m256i count_sum = _mm256_set1_epi8(0);
1306
1307 for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
1308 __m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
1309 __m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
1310 xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
1311 }
1312 int count = get_count_mula(count_sum);
1313 const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
1314 count = count - f1; // remove extra bits (from empty space for align only)
1315 C[i*ldc + j] = (2 * count - K) * mean_val;
1316 }
1317 }
1318 }
1319
1320
1321
1322
1323 //From Berkeley Vision's Caffe!
1324 //https://github.com/BVLC/caffe/blob/master/LICENSE
im2col_cpu_custom_transpose(float * data_im,int channels,int height,int width,int ksize,int stride,int pad,float * data_col,int ldb_align)1325 void im2col_cpu_custom_transpose(float* data_im,
1326 int channels, int height, int width,
1327 int ksize, int stride, int pad, float* data_col, int ldb_align)
1328 {
1329 const int height_col = (height + 2 * pad - ksize) / stride + 1;
1330 const int width_col = (width + 2 * pad - ksize) / stride + 1;
1331 const int channels_col = channels * ksize * ksize;
1332 int c;
1333
1334 // optimized version
1335 if (height_col == height && width_col == width && stride == 1 && pad == 1)
1336 {
1337 #pragma omp parallel for
1338 for (c = 0; c < channels_col; ++c) {
1339 int h, w;
1340 int w_offset = c % ksize;
1341 int h_offset = (c / ksize) % ksize;
1342 int c_im = c / ksize / ksize;
1343 for (h = pad; h < height_col - pad; ++h) {
1344 for (w = pad; w < width_col - pad - 4; w+=8) {
1345 int im_row = h_offset + h - pad;
1346 int im_col = w_offset + w - pad;
1347 //int col_index = (c * height_col + h) * width_col + w;
1348 int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
1349
1350 //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
1351 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
1352 data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0];
1353 data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1];
1354 data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2];
1355 data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3];
1356 data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4];
1357 data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5];
1358 data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6];
1359 data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7];
1360
1361 //_mm256_storeu_ps(&data_col[col_index], src256);
1362 }
1363
1364 for (; w < width_col - pad; ++w) {
1365 int im_row = h_offset + h - pad;
1366 int im_col = w_offset + w - pad;
1367 int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
1368 data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
1369 }
1370 }
1371
1372 {
1373 w = 0;
1374 for (h = 0; h < height_col; ++h) {
1375 int im_row = h_offset + h;
1376 int im_col = w_offset + w;
1377 int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
1378 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1379 im_row, im_col, c_im, pad);
1380 }
1381 }
1382
1383 {
1384 w = width_col - 1;
1385 for (h = 0; h < height_col; ++h) {
1386 int im_row = h_offset + h;
1387 int im_col = w_offset + w;
1388 int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
1389 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1390 im_row, im_col, c_im, pad);
1391 }
1392 }
1393
1394 {
1395 h = 0;
1396 for (w = 0; w < width_col; ++w) {
1397 int im_row = h_offset + h;
1398 int im_col = w_offset + w;
1399 int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
1400 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1401 im_row, im_col, c_im, pad);
1402 }
1403 }
1404
1405 {
1406 h = height_col - 1;
1407 for (w = 0; w < width_col; ++w) {
1408 int im_row = h_offset + h;
1409 int im_col = w_offset + w;
1410 int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
1411 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1412 im_row, im_col, c_im, pad);
1413 }
1414 }
1415 }
1416
1417 }
1418 else {
1419 #pragma omp parallel for
1420 for (c = 0; c < channels_col; ++c) {
1421 int h, w;
1422 int w_offset = c % ksize;
1423 int h_offset = (c / ksize) % ksize;
1424 int c_im = c / ksize / ksize;
1425 for (h = 0; h < height_col; ++h) {
1426 for (w = 0; w < width_col; ++w) {
1427 int im_row = h_offset + h * stride;
1428 int im_col = w_offset + w * stride;
1429
1430 int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
1431 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1432 im_row, im_col, c_im, pad);
1433 }
1434 }
1435 }
1436 }
1437 }
1438
1439
1440 //From Berkeley Vision's Caffe!
1441 //https://github.com/BVLC/caffe/blob/master/LICENSE
im2col_cpu_custom(float * data_im,int channels,int height,int width,int ksize,int stride,int pad,float * data_col)1442 void im2col_cpu_custom(float* data_im,
1443 int channels, int height, int width,
1444 int ksize, int stride, int pad, float* data_col)
1445 {
1446 int c;
1447 const int height_col = (height + 2 * pad - ksize) / stride + 1;
1448 const int width_col = (width + 2 * pad - ksize) / stride + 1;
1449 const int channels_col = channels * ksize * ksize;
1450
1451 // optimized version
1452 if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
1453 {
1454 #pragma omp parallel for
1455 for (c = 0; c < channels_col; ++c) {
1456 int h, w;
1457 int w_offset = c % ksize;
1458 int h_offset = (c / ksize) % ksize;
1459 int c_im = c / ksize / ksize;
1460 for (h = pad; h < height_col-pad; ++h) {
1461 for (w = pad; w < width_col-pad-8; w += 8) {
1462 int im_row = h_offset + h - pad;
1463 int im_col = w_offset + w - pad;
1464 int col_index = (c * height_col + h) * width_col + w;
1465
1466 //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
1467 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
1468 _mm256_storeu_ps(&data_col[col_index], src256);
1469 }
1470
1471 for (; w < width_col - pad; ++w) {
1472 int im_row = h_offset + h - pad;
1473 int im_col = w_offset + w - pad;
1474 int col_index = (c * height_col + h) * width_col + w;
1475
1476 data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
1477 }
1478 }
1479
1480 {
1481 w = 0;
1482 for (h = 0; h < height_col; ++h) {
1483 int im_row = h_offset + h;
1484 int im_col = w_offset + w;
1485 int col_index = (c * height_col + h) * width_col + w;
1486 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1487 im_row, im_col, c_im, pad);
1488 }
1489 }
1490
1491 {
1492 w = width_col-1;
1493 for (h = 0; h < height_col; ++h) {
1494 int im_row = h_offset + h;
1495 int im_col = w_offset + w;
1496 int col_index = (c * height_col + h) * width_col + w;
1497 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1498 im_row, im_col, c_im, pad);
1499 }
1500 }
1501
1502 {
1503 h = 0;
1504 for (w = 0; w < width_col; ++w) {
1505 int im_row = h_offset + h;
1506 int im_col = w_offset + w;
1507 int col_index = (c * height_col + h) * width_col + w;
1508 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1509 im_row, im_col, c_im, pad);
1510 }
1511 }
1512
1513 {
1514 h = height_col-1;
1515 for (w = 0; w < width_col; ++w) {
1516 int im_row = h_offset + h;
1517 int im_col = w_offset + w;
1518 int col_index = (c * height_col + h) * width_col + w;
1519 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
1520 im_row, im_col, c_im, pad);
1521 }
1522 }
1523 }
1524
1525 }
1526 else {
1527 //printf("\n Error: is no non-optimized version \n");
1528 im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
1529 }
1530 }
1531
1532 //From Berkeley Vision's Caffe!
1533 //https://github.com/BVLC/caffe/blob/master/LICENSE
im2col_cpu_custom_align(float * data_im,int channels,int height,int width,int ksize,int stride,int pad,float * data_col,int bit_align)1534 void im2col_cpu_custom_align(float* data_im,
1535 int channels, int height, int width,
1536 int ksize, int stride, int pad, float* data_col, int bit_align)
1537 {
1538 int c;
1539 const int height_col = (height + 2 * pad - ksize) / stride + 1;
1540 const int width_col = (width + 2 * pad - ksize) / stride + 1;
1541 const int channels_col = channels * ksize * ksize;
1542
1543 // optimized version
1544 if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
1545 {
1546 int new_ldb = bit_align;
1547
1548 #pragma omp parallel for
1549 for (c = 0; c < channels_col; ++c) {
1550 int h, w;
1551 int w_offset = c % ksize;
1552 int h_offset = (c / ksize) % ksize;
1553 int c_im = c / ksize / ksize;
1554 for (h = pad; h < height_col - pad; ++h) {
1555 for (w = pad; w < width_col - pad - 8; w += 8) {
1556 int im_row = h_offset + h - pad;
1557 int im_col = w_offset + w - pad;
1558 //int col_index = (c * height_col + h) * width_col + w;
1559 int col_index = c * new_ldb + h * width_col + w;
1560
1561 //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
1562 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
1563 _mm256_storeu_ps(&data_col[col_index], src256);
1564 }
1565
1566 for (; w < width_col - pad; ++w) {
1567 int im_row = h_offset + h - pad;
1568 int im_col = w_offset + w - pad;
1569 //int col_index = (c * height_col + h) * width_col + w;
1570 int col_index = c * new_ldb + h * width_col + w;
1571 data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
1572 }
1573 }
1574
1575 {
1576 w = 0;
1577 for (h = 0; h < height_col; ++h) {
1578 int im_row = h_offset + h;
1579 int im_col = w_offset + w;
1580 //int col_index = (c * height_col + h) * width_col + w;
1581 int col_index = c * new_ldb + h * width_col + w;
1582 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1583 }
1584 }
1585
1586 {
1587 w = width_col - 1;
1588 for (h = 0; h < height_col; ++h) {
1589 int im_row = h_offset + h;
1590 int im_col = w_offset + w;
1591 //int col_index = (c * height_col + h) * width_col + w;
1592 int col_index = c * new_ldb + h * width_col + w;
1593 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1594 }
1595 }
1596
1597 {
1598 h = 0;
1599 for (w = 0; w < width_col; ++w) {
1600 int im_row = h_offset + h;
1601 int im_col = w_offset + w;
1602 //int col_index = (c * height_col + h) * width_col + w;
1603 int col_index = c * new_ldb + h * width_col + w;
1604 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1605 }
1606 }
1607
1608 {
1609 h = height_col - 1;
1610 for (w = 0; w < width_col; ++w) {
1611 int im_row = h_offset + h;
1612 int im_col = w_offset + w;
1613 //int col_index = (c * height_col + h) * width_col + w;
1614 int col_index = c * new_ldb + h * width_col + w;
1615 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1616 }
1617 }
1618 }
1619
1620 }
1621 else {
1622 printf("\n Error: is no non-optimized version \n");
1623 //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
1624 // float_to_bit(b, t_input, src_size);
1625 // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
1626 }
1627 }
1628
1629
1630 //From Berkeley Vision's Caffe!
1631 //https://github.com/BVLC/caffe/blob/master/LICENSE
im2col_cpu_custom_bin(float * data_im,int channels,int height,int width,int ksize,int stride,int pad,float * data_col,int bit_align)1632 void im2col_cpu_custom_bin(float* data_im,
1633 int channels, int height, int width,
1634 int ksize, int stride, int pad, float* data_col, int bit_align)
1635 {
1636 int c;
1637 const int height_col = (height + 2 * pad - ksize) / stride + 1;
1638 const int width_col = (width + 2 * pad - ksize) / stride + 1;
1639 const int channels_col = channels * ksize * ksize;
1640
1641 // optimized version
1642 if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
1643 {
1644 __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
1645 __m256 float_zero256 = _mm256_set1_ps(0.00);
1646
1647 int new_ldb = bit_align;
1648
1649 #pragma omp parallel for
1650 for (c = 0; c < channels_col; ++c) {
1651 int h, w;
1652 int w_offset = c % ksize;
1653 int h_offset = (c / ksize) % ksize;
1654 int c_im = c / ksize / ksize;
1655 for (h = pad; h < height_col - pad; ++h) {
1656 for (w = pad; w < width_col - pad - 8; w += 8) {
1657 int im_row = h_offset + h - pad;
1658 int im_col = w_offset + w - pad;
1659 //int col_index = (c * height_col + h) * width_col + w;
1660 int col_index = c * new_ldb + h * width_col + w;
1661
1662 //__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)]));
1663 //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
1664 //uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
1665 //mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
1666
1667 __m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
1668 __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
1669 uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
1670
1671 uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8];
1672 *dst_ptr |= (mask << (col_index % 8));
1673 }
1674
1675 for (; w < width_col - pad; ++w) {
1676 int im_row = h_offset + h - pad;
1677 int im_col = w_offset + w - pad;
1678 //int col_index = (c * height_col + h) * width_col + w;
1679 int col_index = c * new_ldb + h * width_col + w;
1680
1681 //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
1682 float val = data_im[im_col + width*(im_row + height*c_im)];
1683 if (val > 0) set_bit((unsigned char* const)data_col, col_index);
1684 }
1685 }
1686
1687 {
1688 w = 0;
1689 for (h = 0; h < height_col; ++h) {
1690 int im_row = h_offset + h;
1691 int im_col = w_offset + w;
1692 //int col_index = (c * height_col + h) * width_col + w;
1693 int col_index = c * new_ldb + h * width_col + w;
1694
1695 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1696 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1697 if (val > 0) set_bit((unsigned char* const)data_col, col_index);
1698 }
1699 }
1700
1701 {
1702 w = width_col - 1;
1703 for (h = 0; h < height_col; ++h) {
1704 int im_row = h_offset + h;
1705 int im_col = w_offset + w;
1706 //int col_index = (c * height_col + h) * width_col + w;
1707 int col_index = c * new_ldb + h * width_col + w;
1708
1709 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1710 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1711 if (val > 0) set_bit((unsigned char* const)data_col, col_index);
1712 }
1713 }
1714
1715 {
1716 h = 0;
1717 for (w = 0; w < width_col; ++w) {
1718 int im_row = h_offset + h;
1719 int im_col = w_offset + w;
1720 //int col_index = (c * height_col + h) * width_col + w;
1721 int col_index = c * new_ldb + h * width_col + w;
1722
1723 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1724 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1725 if (val > 0) set_bit((unsigned char* const)data_col, col_index);
1726 }
1727 }
1728
1729 {
1730 h = height_col - 1;
1731 for (w = 0; w < width_col; ++w) {
1732 int im_row = h_offset + h;
1733 int im_col = w_offset + w;
1734 //int col_index = (c * height_col + h) * width_col + w;
1735 int col_index = c * new_ldb + h * width_col + w;
1736
1737 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1738 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
1739 if (val > 0) set_bit((unsigned char* const)data_col, col_index);
1740 }
1741 }
1742 }
1743
1744 }
1745 else {
1746 printf("\n Error: is no non-optimized version \n");
1747 //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
1748 // float_to_bit(b, t_input, src_size);
1749 // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
1750 }
1751 }
1752
1753
activate_array_cpu_custom(float * x,const int n,const ACTIVATION a)1754 void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
1755 {
1756 int i = 0;
1757 if (a == LINEAR)
1758 {}
1759 else if (a == LEAKY)
1760 {
1761 if (is_fma_avx2()) {
1762 __m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
1763 __m256 all256_01 = _mm256_set1_ps(0.1F);
1764
1765 for (i = 0; i < n - 8; i += 8) {
1766 //x[i] = (x[i]>0) ? x[i] : .1*x[i];
1767
1768 __m256 src256 = _mm256_loadu_ps(&x[i]);
1769 __m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1
1770
1771 __m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats
1772
1773 __m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult;
1774 _mm256_storeu_ps(&x[i], result256);
1775 }
1776 }
1777
1778 for (; i < n; ++i) {
1779 x[i] = (x[i]>0) ? x[i] : .1*x[i];
1780 }
1781 }
1782 else {
1783 for (i = 0; i < n; ++i) {
1784 x[i] = activate(x[i], a);
1785 }
1786 }
1787 }
1788
float_to_bit(float * src,unsigned char * dst,size_t size)1789 void float_to_bit(float *src, unsigned char *dst, size_t size)
1790 {
1791 size_t dst_size = size / 8 + 1;
1792 memset(dst, 0, dst_size);
1793
1794 size_t i;
1795 //__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
1796 __m256 float_zero256 = _mm256_set1_ps(0.0);
1797
1798 for (i = 0; i < size; i+=8)
1799 {
1800 //__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
1801 //__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
1802 //uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
1803 ////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
1804
1805 __m256 src256 = _mm256_loadu_ps((float *)(&src[i]));
1806 __m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
1807 uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
1808
1809 dst[i / 8] = mask;
1810 }
1811 }
1812
transpose4x4_SSE(float * A,float * B,const int lda,const int ldb)1813 static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb)
1814 {
1815 __m128 row1 = _mm_loadu_ps(&A[0 * lda]);
1816 __m128 row2 = _mm_loadu_ps(&A[1 * lda]);
1817 __m128 row3 = _mm_loadu_ps(&A[2 * lda]);
1818 __m128 row4 = _mm_loadu_ps(&A[3 * lda]);
1819 _MM_TRANSPOSE4_PS(row1, row2, row3, row4);
1820 _mm_storeu_ps(&B[0 * ldb], row1);
1821 _mm_storeu_ps(&B[1 * ldb], row2);
1822 _mm_storeu_ps(&B[2 * ldb], row3);
1823 _mm_storeu_ps(&B[3 * ldb], row4);
1824 }
1825
transpose_block_SSE4x4(float * A,float * B,const int n,const int m,const int lda,const int ldb,const int block_size)1826 void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
1827 const int lda, const int ldb, const int block_size)
1828 {
1829 int i;
1830 #pragma omp parallel for
1831 for (i = 0; i < n; i += block_size) {
1832 int j, i2, j2;
1833 //int max_i2 = (i + block_size < n) ? (i + block_size) : n;
1834 if (i + block_size < n) {
1835 int max_i2 = i + block_size;
1836 for (j = 0; j < m; j += block_size) {
1837 //int max_j2 = (j + block_size < m) ? (j + block_size) : m;
1838 if (j + block_size < m) {
1839 int max_j2 = j + block_size;
1840 for (i2 = i; i2 < max_i2; i2 += 4) {
1841 for (j2 = j; j2 < max_j2; j2 += 4) {
1842 transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb);
1843 }
1844 }
1845 }
1846 else {
1847 for (i2 = i; i2 < max_i2; ++i2) {
1848 for (j2 = j; j2 < m; ++j2) {
1849 B[j2*ldb + i2] = A[i2*lda + j2];
1850 }
1851 }
1852 }
1853 }
1854 }
1855 else {
1856 for (i2 = i; i2 < n; ++i2) {
1857 for (j2 = 0; j2 < m; ++j2) {
1858 B[j2*ldb + i2] = A[i2*lda + j2];
1859 }
1860 }
1861 }
1862 }
1863 }
1864
1865
forward_maxpool_layer_avx(float * src,float * dst,int * indexes,int size,int w,int h,int out_w,int out_h,int c,int pad,int stride,int batch)1866 void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
1867 int pad, int stride, int batch)
1868 {
1869
1870 const int w_offset = -pad / 2;
1871 const int h_offset = -pad / 2;
1872 int b, k;
1873
1874 for (b = 0; b < batch; ++b) {
1875 #pragma omp parallel for
1876 for (k = 0; k < c; ++k) {
1877 int i, j, m, n;
1878 for (i = 0; i < out_h; ++i) {
1879 //for (j = 0; j < out_w; ++j) {
1880 j = 0;
1881
1882 if(stride == 1 && is_avx() == 1) {
1883 for (j = 0; j < out_w - 8 - (size - 1); j += 8) {
1884 int out_index = j + out_w*(i + out_h*(k + c*b));
1885 __m256 max256 = _mm256_set1_ps(-FLT_MAX);
1886 for (n = 0; n < size; ++n) {
1887 for (m = 0; m < size; ++m) {
1888 int cur_h = h_offset + i*stride + n;
1889 int cur_w = w_offset + j*stride + m;
1890 int index = cur_w + w*(cur_h + h*(k + b*c));
1891 int valid = (cur_h >= 0 && cur_h < h &&
1892 cur_w >= 0 && cur_w < w);
1893 if (!valid) continue;
1894
1895 __m256 src256 = _mm256_loadu_ps(&src[index]);
1896 max256 = _mm256_max_ps(src256, max256);
1897 }
1898 }
1899 _mm256_storeu_ps(&dst[out_index], max256);
1900
1901 }
1902 }
1903 else if (size == 2 && stride == 2 && is_avx() == 1) {
1904 for (j = 0; j < out_w - 4; j += 4) {
1905 int out_index = j + out_w*(i + out_h*(k + c*b));
1906 //float max = -FLT_MAX;
1907 //int max_i = -1;
1908 __m128 max128 = _mm_set1_ps(-FLT_MAX);
1909
1910 for (n = 0; n < size; ++n) {
1911 //for (m = 0; m < size; ++m)
1912 m = 0;
1913 {
1914 int cur_h = h_offset + i*stride + n;
1915 int cur_w = w_offset + j*stride + m;
1916 int index = cur_w + w*(cur_h + h*(k + b*c));
1917 int valid = (cur_h >= 0 && cur_h < h &&
1918 cur_w >= 0 && cur_w < w);
1919 if (!valid) continue;
1920
1921 __m256 src256 = _mm256_loadu_ps(&src[index]);
1922 __m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4));
1923 __m256 max256 = _mm256_max_ps(src256, src256_2);
1924
1925 __m128 src128_0 = _mm256_extractf128_ps(max256, 0);
1926 __m128 src128_1 = _mm256_extractf128_ps(max256, 1);
1927 __m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6));
1928
1929 max128 = _mm_max_ps(src128, max128);
1930 }
1931 }
1932 _mm_storeu_ps(&dst[out_index], max128);
1933 }
1934 }
1935
1936 for (; j < out_w; ++j) {
1937 int out_index = j + out_w*(i + out_h*(k + c*b));
1938 float max = -FLT_MAX;
1939 int max_i = -1;
1940 for (n = 0; n < size; ++n) {
1941 for (m = 0; m < size; ++m) {
1942 int cur_h = h_offset + i*stride + n;
1943 int cur_w = w_offset + j*stride + m;
1944 int index = cur_w + w*(cur_h + h*(k + b*c));
1945 int valid = (cur_h >= 0 && cur_h < h &&
1946 cur_w >= 0 && cur_w < w);
1947 float val = (valid != 0) ? src[index] : -FLT_MAX;
1948 max_i = (val > max) ? index : max_i;
1949 max = (val > max) ? val : max;
1950 }
1951 }
1952 dst[out_index] = max;
1953 if (indexes) indexes[out_index] = max_i;
1954 }
1955 }
1956 }
1957 }
1958 }
1959
1960 #else // AVX
1961
is_avx()1962 int is_avx() {
1963 return 0;
1964 }
1965
is_fma_avx2()1966 int is_fma_avx2() {
1967 return 0;
1968 }
1969
gemm_nn(int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float * C,int ldc)1970 void gemm_nn(int M, int N, int K, float ALPHA,
1971 float *A, int lda,
1972 float *B, int ldb,
1973 float *C, int ldc)
1974 {
1975 int i, j, k;
1976 for (i = 0; i < M; ++i) {
1977 for (k = 0; k < K; ++k) {
1978 PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
1979 for (j = 0; j < N; ++j) {
1980 C[i*ldc + j] += A_PART*B[k*ldb + j];
1981 }
1982 }
1983 }
1984 }
1985
gemm_nn_fast(int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float * C,int ldc)1986 void gemm_nn_fast(int M, int N, int K, float ALPHA,
1987 float *A, int lda,
1988 float *B, int ldb,
1989 float *C, int ldc)
1990 {
1991 int i, j, k;
1992 #pragma omp parallel for
1993 for (i = 0; i < M; ++i) {
1994 for (k = 0; k < K; ++k) {
1995 PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
1996 for (j = 0; j < N; ++j) {
1997 C[i*ldc + j] += A_PART*B[k*ldb + j];
1998 }
1999 }
2000 }
2001 }
2002
gemm_nn_bin_32bit_packed(int M,int N,int K,float ALPHA,uint32_t * A,int lda,uint32_t * B,int ldb,float * C,int ldc,float * mean_arr)2003 void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
2004 uint32_t *A, int lda,
2005 uint32_t *B, int ldb,
2006 float *C, int ldc, float *mean_arr)
2007 {
2008 int i;
2009 #pragma omp parallel for
2010 for (i = 0; i < M; ++i) { // l.n
2011 int j, s;
2012 float mean_val = mean_arr[i];
2013 //printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
2014 for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
2015 {
2016 //PUT_IN_REGISTER float A_PART = 1*a[i*k + s];
2017 PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s];
2018 for (j = 0; j < N; ++j) // out_h*out_w;
2019 {
2020 //c[i*n + j] += A_PART*b[s*n + j];
2021 PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j];
2022 uint32_t xnor_result = ~(A_PART ^ B_PART);
2023 //printf(" xnor_result = %d, ", xnor_result);
2024 int32_t count = popcnt_32(xnor_result); // must be Signed int
2025
2026 C[i*ldc + j] += (2 * count - 32) * mean_val;
2027 //c[i*n + j] += count*mean;
2028 }
2029 }
2030 }
2031 }
2032
2033
convolution_2d(int w,int h,int ksize,int n,int c,int pad,int stride,float * weights,float * input,float * output,float * mean)2034 void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
2035 float *weights, float *input, float *output, float *mean)
2036 {
2037 const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
2038 const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
2039 //int i, f, j;
2040
2041 int fil;
2042 // filter index
2043 #pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
2044 for (fil = 0; fil < n; ++fil) {
2045 int chan, y, x, f_y, f_x;
2046 // channel index
2047 for (chan = 0; chan < c; ++chan)
2048 // input - y
2049 for (y = 0; y < h; ++y)
2050 // input - x
2051 for (x = 0; x < w; ++x)
2052 {
2053 int const output_index = fil*w*h + y*w + x;
2054 int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
2055 int const input_pre_index = chan*w*h;
2056 float sum = 0;
2057
2058 // filter - y
2059 for (f_y = 0; f_y < ksize; ++f_y)
2060 {
2061 int input_y = y + f_y - pad;
2062 // filter - x
2063 for (f_x = 0; f_x < ksize; ++f_x)
2064 {
2065 int input_x = x + f_x - pad;
2066 if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
2067
2068 int input_index = input_pre_index + input_y*w + input_x;
2069 int weights_index = weights_pre_index + f_y*ksize + f_x;
2070
2071 sum += input[input_index] * weights[weights_index];
2072 }
2073 }
2074 // l.output[filters][width][height] +=
2075 // state.input[channels][width][height] *
2076 // l.weights[filters][channels][filter_width][filter_height];
2077 output[output_index] += sum;
2078 }
2079 }
2080 }
2081
popcnt_64(uint64_t val64)2082 static inline int popcnt_64(uint64_t val64) {
2083 #ifdef WIN32 // Windows
2084 #ifdef _WIN64 // Windows 64-bit
2085 int tmp_count = __popcnt64(val64);
2086 #else // Windows 32-bit
2087 int tmp_count = __popcnt(val64);
2088 tmp_count += __popcnt(val64 >> 32);
2089 #endif
2090 #else // Linux
2091 #if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit
2092 int tmp_count = __builtin_popcountll(val64);
2093 #else // Linux 32-bit
2094 int tmp_count = __builtin_popcount(val64);
2095 tmp_count += __builtin_popcount(val64 >> 32);
2096 #endif
2097 #endif
2098 return tmp_count;
2099 }
2100
gemm_nn_custom_bin_mean_transposed(int M,int N,int K,float ALPHA_UNUSED,unsigned char * A,int lda,unsigned char * B,int ldb,float * C,int ldc,float * mean_arr)2101 void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
2102 unsigned char *A, int lda,
2103 unsigned char *B, int ldb,
2104 float *C, int ldc, float *mean_arr)
2105 {
2106 int i;
2107
2108 #pragma omp parallel for
2109 for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
2110 int j, k;
2111 float mean_val = mean_arr[i];
2112
2113 for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
2114 int count = 0;
2115
2116 for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
2117 uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
2118 uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
2119 uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
2120
2121 int tmp_count = popcnt_64(c_bit64);
2122
2123 if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
2124 count += tmp_count;
2125 //binary_int64_printf(c_bit64);
2126 //printf(", count = %d \n\n", tmp_count);
2127 }
2128
2129 C[i*ldc + j] = (2 * count - K) * mean_val;
2130 }
2131 }
2132 }
2133
im2col_cpu_custom_transpose(float * data_im,int channels,int height,int width,int ksize,int stride,int pad,float * data_col,int ldb_align)2134 void im2col_cpu_custom_transpose(float* data_im,
2135 int channels, int height, int width,
2136 int ksize, int stride, int pad, float* data_col, int ldb_align)
2137 {
2138 printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n");
2139 }
2140
2141 //From Berkeley Vision's Caffe!
2142 //https://github.com/BVLC/caffe/blob/master/LICENSE
im2col_cpu_custom(float * data_im,int channels,int height,int width,int ksize,int stride,int pad,float * data_col)2143 void im2col_cpu_custom(float* data_im,
2144 int channels, int height, int width,
2145 int ksize, int stride, int pad, float* data_col)
2146 {
2147 im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
2148 return;
2149
2150 int c;
2151 const int height_col = (height + 2 * pad - ksize) / stride + 1;
2152 const int width_col = (width + 2 * pad - ksize) / stride + 1;
2153 const int channels_col = channels * ksize * ksize;
2154
2155 // optimized version
2156 if (height_col == height && width_col == width && stride == 1 && pad == 1)
2157 {
2158 #pragma omp parallel for
2159 for (c = 0; c < channels_col; ++c) {
2160 int h, w;
2161 int w_offset = c % ksize;
2162 int h_offset = (c / ksize) % ksize;
2163 int c_im = c / ksize / ksize;
2164 for (h = pad; h < height_col - pad; ++h) {
2165 for (w = pad; w < width_col - pad; ++w) {
2166 int im_row = h_offset + h - pad;
2167 int im_col = w_offset + w - pad;
2168 int col_index = (c * height_col + h) * width_col + w;
2169
2170 data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
2171 }
2172
2173 for (; w < width_col - pad; ++w) {
2174 int im_row = h_offset + h - pad;
2175 int im_col = w_offset + w - pad;
2176 int col_index = (c * height_col + h) * width_col + w;
2177
2178 data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
2179 }
2180 }
2181
2182 {
2183 w = 0;
2184 for (h = 0; h < height_col; ++h) {
2185 int im_row = h_offset + h;
2186 int im_col = w_offset + w;
2187 int col_index = (c * height_col + h) * width_col + w;
2188 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
2189 im_row, im_col, c_im, pad);
2190 }
2191 }
2192
2193 {
2194 w = width_col - 1;
2195 for (h = 0; h < height_col; ++h) {
2196 int im_row = h_offset + h;
2197 int im_col = w_offset + w;
2198 int col_index = (c * height_col + h) * width_col + w;
2199 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
2200 im_row, im_col, c_im, pad);
2201 }
2202 }
2203
2204 {
2205 h = 0;
2206 for (w = 0; w < width_col; ++w) {
2207 int im_row = h_offset + h;
2208 int im_col = w_offset + w;
2209 int col_index = (c * height_col + h) * width_col + w;
2210 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
2211 im_row, im_col, c_im, pad);
2212 }
2213 }
2214
2215 {
2216 h = height_col - 1;
2217 for (w = 0; w < width_col; ++w) {
2218 int im_row = h_offset + h;
2219 int im_col = w_offset + w;
2220 int col_index = (c * height_col + h) * width_col + w;
2221 data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
2222 im_row, im_col, c_im, pad);
2223 }
2224 }
2225 }
2226
2227 }
2228 else {
2229 //printf("\n Error: is no non-optimized version \n");
2230 im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
2231 }
2232 }
2233
2234
2235 //From Berkeley Vision's Caffe!
2236 //https://github.com/BVLC/caffe/blob/master/LICENSE
im2col_cpu_custom_bin(float * data_im,int channels,int height,int width,int ksize,int stride,int pad,float * data_col,int bit_align)2237 void im2col_cpu_custom_bin(float* data_im,
2238 int channels, int height, int width,
2239 int ksize, int stride, int pad, float* data_col, int bit_align)
2240 {
2241 int c;
2242 const int height_col = (height + 2 * pad - ksize) / stride + 1;
2243 const int width_col = (width + 2 * pad - ksize) / stride + 1;
2244 const int channels_col = channels * ksize * ksize;
2245
2246 // optimized version
2247 if (height_col == height && width_col == width && stride == 1 && pad == 1)
2248 {
2249 int new_ldb = bit_align;
2250
2251 #pragma omp parallel for
2252 for (c = 0; c < channels_col; ++c) {
2253 int h, w;
2254 int w_offset = c % ksize;
2255 int h_offset = (c / ksize) % ksize;
2256 int c_im = c / ksize / ksize;
2257 for (h = pad; h < height_col - pad; ++h) {
2258 for (w = pad; w < width_col - pad - 8; w += 1) {
2259 int im_row = h_offset + h - pad;
2260 int im_col = w_offset + w - pad;
2261 //int col_index = (c * height_col + h) * width_col + w;
2262 int col_index = c * new_ldb + h * width_col + w;
2263
2264 float val = data_im[im_col + width*(im_row + height*c_im)];
2265 if (val > 0) set_bit((unsigned char*)data_col, col_index);
2266 }
2267
2268 for (; w < width_col - pad; ++w) {
2269 int im_row = h_offset + h - pad;
2270 int im_col = w_offset + w - pad;
2271 //int col_index = (c * height_col + h) * width_col + w;
2272 int col_index = c * new_ldb + h * width_col + w;
2273
2274 //data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
2275 float val = data_im[im_col + width*(im_row + height*c_im)];
2276 if (val > 0) set_bit((unsigned char*)data_col, col_index);
2277 }
2278 }
2279
2280 {
2281 w = 0;
2282 for (h = 0; h < height_col; ++h) {
2283 int im_row = h_offset + h;
2284 int im_col = w_offset + w;
2285 //int col_index = (c * height_col + h) * width_col + w;
2286 int col_index = c * new_ldb + h * width_col + w;
2287
2288 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2289 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2290 if (val > 0) set_bit((unsigned char*)data_col, col_index);
2291 }
2292 }
2293
2294 {
2295 w = width_col - 1;
2296 for (h = 0; h < height_col; ++h) {
2297 int im_row = h_offset + h;
2298 int im_col = w_offset + w;
2299 //int col_index = (c * height_col + h) * width_col + w;
2300 int col_index = c * new_ldb + h * width_col + w;
2301
2302 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2303 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2304 if (val > 0) set_bit((unsigned char*)data_col, col_index);
2305 }
2306 }
2307
2308 {
2309 h = 0;
2310 for (w = 0; w < width_col; ++w) {
2311 int im_row = h_offset + h;
2312 int im_col = w_offset + w;
2313 //int col_index = (c * height_col + h) * width_col + w;
2314 int col_index = c * new_ldb + h * width_col + w;
2315
2316 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2317 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2318 if (val > 0) set_bit((unsigned char*)data_col, col_index);
2319 }
2320 }
2321
2322 {
2323 h = height_col - 1;
2324 for (w = 0; w < width_col; ++w) {
2325 int im_row = h_offset + h;
2326 int im_col = w_offset + w;
2327 //int col_index = (c * height_col + h) * width_col + w;
2328 int col_index = c * new_ldb + h * width_col + w;
2329
2330 //data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2331 float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
2332 if (val > 0) set_bit((unsigned char*)data_col, col_index);
2333 }
2334 }
2335 }
2336
2337 }
2338 else {
2339 printf("\n Error: is no non-optimized version \n");
2340 //im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
2341 // float_to_bit(b, t_input, src_size);
2342 // transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
2343 }
2344 }
2345
2346
activate_array_cpu_custom(float * x,const int n,const ACTIVATION a)2347 void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
2348 {
2349 int i;
2350 if (a == LINEAR)
2351 {
2352 }
2353 else if (a == LEAKY)
2354 {
2355 for (i = 0; i < n; ++i) {
2356 x[i] = (x[i]>0) ? x[i] : .1*x[i];
2357 }
2358 }
2359 else {
2360 for (i = 0; i < n; ++i) {
2361 x[i] = activate(x[i], a);
2362 }
2363 }
2364 }
2365
float_to_bit(float * src,unsigned char * dst,size_t size)2366 void float_to_bit(float *src, unsigned char *dst, size_t size)
2367 {
2368 size_t dst_size = size / 8 + 1;
2369 memset(dst, 0, dst_size);
2370
2371 size_t i;
2372 char* byte_arr = (char*)xcalloc(size, sizeof(char));
2373 for (i = 0; i < size; ++i) {
2374 if (src[i] > 0) byte_arr[i] = 1;
2375 }
2376
2377 //for (i = 0; i < size; ++i) {
2378 // dst[i / 8] |= byte_arr[i] << (i % 8);
2379 //}
2380
2381 for (i = 0; i < size; i += 8) {
2382 char dst_tmp = 0;
2383 dst_tmp |= byte_arr[i + 0] << 0;
2384 dst_tmp |= byte_arr[i + 1] << 1;
2385 dst_tmp |= byte_arr[i + 2] << 2;
2386 dst_tmp |= byte_arr[i + 3] << 3;
2387 dst_tmp |= byte_arr[i + 4] << 4;
2388 dst_tmp |= byte_arr[i + 5] << 5;
2389 dst_tmp |= byte_arr[i + 6] << 6;
2390 dst_tmp |= byte_arr[i + 7] << 7;
2391 dst[i / 8] = dst_tmp;
2392 }
2393 free(byte_arr);
2394 }
2395
transpose_scalar_block(float * A,float * B,const int lda,const int ldb,const int block_size)2396 static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size)
2397 {
2398 int i;
2399 //#pragma omp parallel for
2400 for (i = 0; i<block_size; i++) {
2401 int j;
2402 for (j = 0; j<block_size; j++) {
2403 B[j*ldb + i] = A[i*lda + j];
2404 }
2405 }
2406 }
2407
transpose_block_SSE4x4(float * A,float * B,const int n,const int m,const int lda,const int ldb,const int block_size)2408 void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
2409 const int lda, const int ldb, const int block_size)
2410 {
2411 int i;
2412 #pragma omp parallel for
2413 for (i = 0; i < n; i += block_size) {
2414 int j, i2, j2;
2415 for (j = 0; j < m; j += block_size) {
2416 int max_i2 = i + block_size < n ? i + block_size : n;
2417 int max_j2 = j + block_size < m ? j + block_size : m;
2418 for (i2 = i; i2 < max_i2; ++i2) {
2419 for (j2 = j; j2 < max_j2; ++j2) {
2420 B[j2*ldb + i2] = A[i2*lda + j2];
2421 }
2422 }
2423 }
2424 }
2425 }
2426
forward_maxpool_layer_avx(float * src,float * dst,int * indexes,int size,int w,int h,int out_w,int out_h,int c,int pad,int stride,int batch)2427 void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
2428 int pad, int stride, int batch)
2429 {
2430 int b, k;
2431 const int w_offset = -pad / 2;
2432 const int h_offset = -pad / 2;
2433
2434 for (b = 0; b < batch; ++b) {
2435 #pragma omp parallel for
2436 for (k = 0; k < c; ++k) {
2437 int i, j, m, n;
2438 for (i = 0; i < out_h; ++i) {
2439 for (j = 0; j < out_w; ++j) {
2440 int out_index = j + out_w*(i + out_h*(k + c*b));
2441 float max = -FLT_MAX;
2442 int max_i = -1;
2443 for (n = 0; n < size; ++n) {
2444 for (m = 0; m < size; ++m) {
2445 int cur_h = h_offset + i*stride + n;
2446 int cur_w = w_offset + j*stride + m;
2447 int index = cur_w + w*(cur_h + h*(k + b*c));
2448 int valid = (cur_h >= 0 && cur_h < h &&
2449 cur_w >= 0 && cur_w < w);
2450 float val = (valid != 0) ? src[index] : -FLT_MAX;
2451 max_i = (val > max) ? index : max_i;
2452 max = (val > max) ? val : max;
2453 }
2454 }
2455 dst[out_index] = max;
2456 if (indexes) indexes[out_index] = max_i;
2457 }
2458 }
2459 }
2460 }
2461 }
2462
2463 #endif // AVX
2464
2465
2466 // 32 channels -> 1 channel (with 32 floats)
2467 // 256 channels -> 8 channels (with 32 floats)
repack_input(float * input,float * re_packed_input,int w,int h,int c)2468 void repack_input(float *input, float *re_packed_input, int w, int h, int c)
2469 {
2470 const int items_per_channel = w * h;
2471 int chan, i;
2472 for (chan = 0; chan < c; chan += 32)
2473 {
2474 for (i = 0; i < items_per_channel; ++i)
2475 {
2476 int c_pack;
2477 for (c_pack = 0; c_pack < 32; ++c_pack) {
2478 float src = input[(chan + c_pack)*items_per_channel + i];
2479
2480 re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
2481 }
2482 }
2483 }
2484 }
2485
transpose_uint32(uint32_t * src,uint32_t * dst,int src_h,int src_w,int src_align,int dst_align)2486 void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
2487 {
2488 //l.bit_align - algined (n) by 32
2489 //new_ldb - aligned (k) by 256
2490
2491 int i;
2492 //#pragma omp parallel for
2493 for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c;
2494 {
2495 int j;
2496 for (j = 0; j < src_w; j += 1) // out_h*out_w;
2497 {
2498 ((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
2499 }
2500 }
2501 }
2502
gemm_nn_bin_transposed_32bit_packed(int M,int N,int K,float ALPHA,uint32_t * A,int lda,uint32_t * B,int ldb,float * C,int ldc,float * mean_arr)2503 void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
2504 uint32_t *A, int lda,
2505 uint32_t *B, int ldb,
2506 float *C, int ldc, float *mean_arr)
2507 {
2508 int i;
2509 #pragma omp parallel for
2510 for (i = 0; i < M; ++i) { // l.n
2511 int j, s;
2512 float mean_val = mean_arr[i];
2513 for (j = 0; j < N; ++j) // out_h*out_w;
2514 {
2515 float val = 0;
2516 for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
2517 {
2518 PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s];
2519 PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s];
2520 uint32_t xnor_result = ~(A_PART ^ B_PART);
2521 int32_t count = popcnt_32(xnor_result); // must be Signed int
2522
2523 val += (2 * count - 32) * mean_val;
2524 }
2525 C[i*ldc + j] += val;
2526 }
2527 }
2528 }
2529
convolution_repacked(uint32_t * packed_input,uint32_t * packed_weights,float * output,int w,int h,int c,int n,int size,int pad,int new_lda,float * mean_arr)2530 void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
2531 int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr)
2532 {
2533 int fil;
2534 // filter index
2535 #pragma omp parallel for
2536 for (fil = 0; fil < n; ++fil) {
2537 float mean_val = mean_arr[fil];
2538 int chan, y, x, f_y, f_x; // c_pack
2539 // channel index
2540 for (chan = 0; chan < c / 32; ++chan)
2541 //for (chan = 0; chan < l.c; chan += 32)
2542 //for (c_pack = 0; c_pack < 32; ++c_pack)
2543 // input - y
2544 for (y = 0; y < h; ++y)
2545 // input - x
2546 for (x = 0; x < w; ++x)
2547 {
2548 int const output_index = fil*w*h + y*w + x;
2549 float sum = 0;
2550
2551 // filter - y
2552 for (f_y = 0; f_y < size; ++f_y)
2553 {
2554 int input_y = y + f_y - pad;
2555 // filter - x
2556 for (f_x = 0; f_x < size; ++f_x)
2557 {
2558 int input_x = x + f_x - pad;
2559 if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
2560
2561 // normal
2562 //float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x];
2563 //float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x];
2564
2565 // packed
2566 //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
2567 //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
2568 //sum += input * weight;
2569
2570 //float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
2571 //float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
2572 //uint32_t bit1 = input > 0;
2573 //uint32_t bit2 = weight > 0;
2574 //uint32_t count = (~(bit1 ^ bit2)) & 1;
2575 //float result = (2 * (float)count - 1) * mean_val;
2576 //printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result);
2577 //sum += result;
2578
2579 uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x];
2580 //uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x];
2581 uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x];
2582
2583 uint32_t xnor_result = ~(input ^ weight);
2584 int32_t count = popcnt_32(xnor_result); // mandatory Signed int
2585 sum += (2 * count - 32) * mean_val;
2586 }
2587 }
2588 // l.output[filters][width][height] +=
2589 // state.input[channels][width][height] *
2590 // l.weights[filters][channels][filter_width][filter_height];
2591 output[output_index] += sum;
2592 }
2593 }
2594 }
2595
gemm_nt(int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float * C,int ldc)2596 void gemm_nt(int M, int N, int K, float ALPHA,
2597 float *A, int lda,
2598 float *B, int ldb,
2599 float *C, int ldc)
2600 {
2601 int i,j,k;
2602 for(i = 0; i < M; ++i){
2603 for(j = 0; j < N; ++j){
2604 PUT_IN_REGISTER float sum = 0;
2605 for(k = 0; k < K; ++k){
2606 sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
2607 }
2608 C[i*ldc+j] += sum;
2609 }
2610 }
2611 }
2612
gemm_tn(int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float * C,int ldc)2613 void gemm_tn(int M, int N, int K, float ALPHA,
2614 float *A, int lda,
2615 float *B, int ldb,
2616 float *C, int ldc)
2617 {
2618 int i,j,k;
2619 for(i = 0; i < M; ++i){
2620 for(k = 0; k < K; ++k){
2621 PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i];
2622 for(j = 0; j < N; ++j){
2623 C[i*ldc+j] += A_PART*B[k*ldb+j];
2624 }
2625 }
2626 }
2627 }
2628
gemm_tt(int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float * C,int ldc)2629 void gemm_tt(int M, int N, int K, float ALPHA,
2630 float *A, int lda,
2631 float *B, int ldb,
2632 float *C, int ldc)
2633 {
2634 int i,j,k;
2635 for(i = 0; i < M; ++i){
2636 for(j = 0; j < N; ++j){
2637 PUT_IN_REGISTER float sum = 0;
2638 for(k = 0; k < K; ++k){
2639 sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
2640 }
2641 C[i*ldc+j] += sum;
2642 }
2643 }
2644 }
2645
2646
gemm_cpu(int TA,int TB,int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float BETA,float * C,int ldc)2647 void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
2648 float *A, int lda,
2649 float *B, int ldb,
2650 float BETA,
2651 float *C, int ldc)
2652 {
2653 //printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
2654 if (BETA != 1){
2655 int i, j;
2656 for(i = 0; i < M; ++i){
2657 for(j = 0; j < N; ++j){
2658 C[i*ldc + j] *= BETA;
2659 }
2660 }
2661 }
2662
2663 is_avx(); // initialize static variable
2664 if (is_fma_avx2() && !TA && !TB) {
2665 gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
2666 }
2667 else {
2668 int t;
2669 #pragma omp parallel for
2670 for (t = 0; t < M; ++t) {
2671 if (!TA && !TB)
2672 gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
2673 else if (TA && !TB)
2674 gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
2675 else if (!TA && TB)
2676 gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
2677 else
2678 gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
2679 }
2680 }
2681 }
2682
2683 #ifdef GPU
2684
2685 #include <math.h>
2686
gemm_ongpu(int TA,int TB,int M,int N,int K,float ALPHA,float * A_gpu,int lda,float * B_gpu,int ldb,float BETA,float * C_gpu,int ldc)2687 void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
2688 float *A_gpu, int lda,
2689 float *B_gpu, int ldb,
2690 float BETA,
2691 float *C_gpu, int ldc)
2692 {
2693 cublasHandle_t handle = blas_handle();
2694 cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream());
2695 CHECK_CUDA(stream_status);
2696 cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
2697 (TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
2698 CHECK_CUDA(status);
2699 }
2700
gemm_gpu(int TA,int TB,int M,int N,int K,float ALPHA,float * A,int lda,float * B,int ldb,float BETA,float * C,int ldc)2701 void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
2702 float *A, int lda,
2703 float *B, int ldb,
2704 float BETA,
2705 float *C, int ldc)
2706 {
2707 float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
2708 float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
2709 float *C_gpu = cuda_make_array(C, ldc*M);
2710
2711 gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
2712
2713 cuda_pull_array(C_gpu, C, ldc*M);
2714 cuda_free(A_gpu);
2715 cuda_free(B_gpu);
2716 cuda_free(C_gpu);
2717 }
2718
2719 #include <stdio.h>
2720 #include <stdlib.h>
2721 #include <string.h>
2722 #include <time.h>
2723
time_gpu_random_matrix(int TA,int TB,int m,int k,int n)2724 void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
2725 {
2726 float *a;
2727 if(!TA) a = random_matrix(m,k);
2728 else a = random_matrix(k,m);
2729 int lda = (!TA)?k:m;
2730 float *b;
2731 if(!TB) b = random_matrix(k,n);
2732 else b = random_matrix(n,k);
2733 int ldb = (!TB)?n:k;
2734
2735 float *c = random_matrix(m,n);
2736 int i;
2737 clock_t start = clock(), end;
2738 for(i = 0; i<32; ++i){
2739 gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
2740 }
2741 end = clock();
2742 printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
2743 free(a);
2744 free(b);
2745 free(c);
2746 }
2747
time_ongpu(int TA,int TB,int m,int k,int n)2748 void time_ongpu(int TA, int TB, int m, int k, int n)
2749 {
2750 int iter = 10;
2751 float *a = random_matrix(m,k);
2752 float *b = random_matrix(k,n);
2753
2754 int lda = (!TA)?k:m;
2755 int ldb = (!TB)?n:k;
2756
2757 float *c = random_matrix(m,n);
2758
2759 float *a_cl = cuda_make_array(a, m*k);
2760 float *b_cl = cuda_make_array(b, k*n);
2761 float *c_cl = cuda_make_array(c, m*n);
2762
2763 int i;
2764 clock_t start = clock(), end;
2765 for(i = 0; i<iter; ++i){
2766 gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
2767 cudaDeviceSynchronize();
2768 }
2769 double flop = ((double)m)*n*(2.*k + 2.)*iter;
2770 double gflop = flop/pow(10., 9);
2771 end = clock();
2772 double seconds = sec(end-start);
2773 printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
2774 cuda_free(a_cl);
2775 cuda_free(b_cl);
2776 cuda_free(c_cl);
2777 free(a);
2778 free(b);
2779 free(c);
2780 }
2781
2782
test_gpu_accuracy(int TA,int TB,int m,int k,int n)2783 void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
2784 {
2785 srand(0);
2786 float *a;
2787 if(!TA) a = random_matrix(m,k);
2788 else a = random_matrix(k,m);
2789 int lda = (!TA)?k:m;
2790 float *b;
2791 if(!TB) b = random_matrix(k,n);
2792 else b = random_matrix(n,k);
2793 int ldb = (!TB)?n:k;
2794
2795 float *c = random_matrix(m,n);
2796 float *c_gpu = random_matrix(m,n);
2797 memset(c, 0, m*n*sizeof(float));
2798 memset(c_gpu, 0, m*n*sizeof(float));
2799 int i;
2800 //pm(m,k,b);
2801 gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
2802 //printf("GPU\n");
2803 //pm(m, n, c_gpu);
2804
2805 gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
2806 //printf("\n\nCPU\n");
2807 //pm(m, n, c);
2808 double sse = 0;
2809 for(i = 0; i < m*n; ++i) {
2810 //printf("%f %f\n", c[i], c_gpu[i]);
2811 sse += pow(c[i]-c_gpu[i], 2);
2812 }
2813 printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
2814 free(a);
2815 free(b);
2816 free(c);
2817 free(c_gpu);
2818 }
2819
test_gpu_blas()2820 int test_gpu_blas()
2821 {
2822 /*
2823 test_gpu_accuracy(0,0,10,576,75);
2824
2825 test_gpu_accuracy(0,0,17,10,10);
2826 test_gpu_accuracy(1,0,17,10,10);
2827 test_gpu_accuracy(0,1,17,10,10);
2828 test_gpu_accuracy(1,1,17,10,10);
2829
2830 test_gpu_accuracy(0,0,1000,10,100);
2831 test_gpu_accuracy(1,0,1000,10,100);
2832 test_gpu_accuracy(0,1,1000,10,100);
2833 test_gpu_accuracy(1,1,1000,10,100);
2834
2835 test_gpu_accuracy(0,0,10,10,10);
2836
2837 time_ongpu(0,0,64,2916,363);
2838 time_ongpu(0,0,64,2916,363);
2839 time_ongpu(0,0,64,2916,363);
2840 time_ongpu(0,0,192,729,1600);
2841 time_ongpu(0,0,384,196,1728);
2842 time_ongpu(0,0,256,196,3456);
2843 time_ongpu(0,0,256,196,2304);
2844 time_ongpu(0,0,128,4096,12544);
2845 time_ongpu(0,0,128,4096,4096);
2846 */
2847 time_ongpu(0,0,64,75,12544);
2848 time_ongpu(0,0,64,75,12544);
2849 time_ongpu(0,0,64,75,12544);
2850 time_ongpu(0,0,64,576,12544);
2851 time_ongpu(0,0,256,2304,784);
2852 time_ongpu(1,1,2304,256,784);
2853 time_ongpu(0,0,512,4608,196);
2854 time_ongpu(1,1,4608,512,196);
2855
2856 return 0;
2857 }
2858 #endif
2859
2860
2861
init_cpu()2862 void init_cpu() {
2863 is_avx();
2864 is_fma_avx2();
2865 }
2866