1 #ifndef GEMM_H
2 #define GEMM_H
3 #include "activations.h"
4 #include <stdint.h>
5 #include <stddef.h>
6 #ifdef __cplusplus
7 extern "C" {
8 #endif
9
10 void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
11 float *weights, float *input, float *output, float *mean);
12
set_bit(unsigned char * const dst,size_t index)13 static inline void set_bit(unsigned char *const dst, size_t index) {
14 size_t dst_i = index / 8;
15 int dst_shift = index % 8;
16 dst[dst_i] |= 1 << dst_shift;
17 //dst[dst_i] |= 1 << (8 - dst_shift);
18 }
19
get_bit(unsigned char const * const src,size_t index)20 static inline unsigned char get_bit(unsigned char const*const src, size_t index) {
21 size_t src_i = index / 8;
22 int src_shift = index % 8;
23 unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
24 //unsigned char val = (src[src_i] & (1 << (8 - src_shift))) > 0;
25 return val;
26 }
27
28 int is_avx();
29 int is_fma_avx2();
30
31 void float_to_bit(float *src, unsigned char *dst, size_t size);
32
33 void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
34 const int lda, const int ldb, const int block_size);
35
36 void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
37 const int lda, const int ldb, const int block_size);
38
39 void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
40 unsigned char *A, int lda,
41 unsigned char *B, int ldb,
42 float *C, int ldc, float *mean_arr);
43
44 void im2col_cpu_custom(float* data_im,
45 int channels, int height, int width,
46 int ksize, int stride, int pad, float* data_col);
47
48 void im2col_cpu_custom_align(float* data_im,
49 int channels, int height, int width,
50 int ksize, int stride, int pad, float* data_col, int bit_align);
51
52 void im2col_cpu_custom_bin(float* data_im,
53 int channels, int height, int width,
54 int ksize, int stride, int pad, float* data_col, int bit_align);
55
56 void im2col_cpu_custom_transpose(float* data_im,
57 int channels, int height, int width,
58 int ksize, int stride, int pad, float* data_col, int ldb_align);
59
60 void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a);
61
62 void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n);
63
64 void gemm_bin(int M, int N, int K, float ALPHA,
65 char *A, int lda,
66 float *B, int ldb,
67 float *C, int ldc);
68
69 void repack_input(float *input, float *re_packed_input, int w, int h, int c);
70
71 void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
72 int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr);
73
74 void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
75 uint32_t *A, int lda,
76 uint32_t *B, int ldb,
77 float *C, int ldc, float *mean_arr);
78
79 void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align);
80
81 void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
82 uint32_t *A, int lda,
83 uint32_t *B, int ldb,
84 float *C, int ldc, float *mean_arr);
85
86
87 void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
88 int pad, int stride, int batch);
89
90
91 void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
92 float *A, int lda,
93 float *B, int ldb,
94 float BETA,
95 float *C, int ldc);
96
97 void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
98 float *A, int lda,
99 float *B, int ldb,
100 float BETA,
101 float *C, int ldc);
102
103 #ifdef GPU
104 void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
105 float *A_gpu, int lda,
106 float *B_gpu, int ldb,
107 float BETA,
108 float *C_gpu, int ldc);
109
110 void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
111 float *A, int lda,
112 float *B, int ldb,
113 float BETA,
114 float *C, int ldc);
115 #endif
116 #ifdef __cplusplus
117 }
118 #endif
119 #endif
120