1 #include "blas.h"
2 #include "utils.h"
3
4 #include <math.h>
5 #include <assert.h>
6 #include <float.h>
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <string.h>
reorg_cpu(float * x,int out_w,int out_h,int out_c,int batch,int stride,int forward,float * out)10 void reorg_cpu(float *x, int out_w, int out_h, int out_c, int batch, int stride, int forward, float *out)
11 {
12 int b,i,j,k;
13 int in_c = out_c/(stride*stride);
14
15 //printf("\n out_c = %d, out_w = %d, out_h = %d, stride = %d, forward = %d \n", out_c, out_w, out_h, stride, forward);
16 //printf(" in_c = %d, in_w = %d, in_h = %d \n", in_c, out_w*stride, out_h*stride);
17
18 for(b = 0; b < batch; ++b){
19 for(k = 0; k < out_c; ++k){
20 for(j = 0; j < out_h; ++j){
21 for(i = 0; i < out_w; ++i){
22 int in_index = i + out_w*(j + out_h*(k + out_c*b));
23 int c2 = k % in_c;
24 int offset = k / in_c;
25 int w2 = i*stride + offset % stride;
26 int h2 = j*stride + offset / stride;
27 int out_index = w2 + out_w*stride*(h2 + out_h*stride*(c2 + in_c*b));
28 if(forward) out[out_index] = x[in_index]; // used by default for forward (i.e. forward = 0)
29 else out[in_index] = x[out_index];
30 }
31 }
32 }
33 }
34 }
35
flatten(float * x,int size,int layers,int batch,int forward)36 void flatten(float *x, int size, int layers, int batch, int forward)
37 {
38 float* swap = (float*)xcalloc(size * layers * batch, sizeof(float));
39 int i,c,b;
40 for(b = 0; b < batch; ++b){
41 for(c = 0; c < layers; ++c){
42 for(i = 0; i < size; ++i){
43 int i1 = b*layers*size + c*size + i;
44 int i2 = b*layers*size + i*layers + c;
45 if (forward) swap[i2] = x[i1];
46 else swap[i1] = x[i2];
47 }
48 }
49 }
50 memcpy(x, swap, size*layers*batch*sizeof(float));
51 free(swap);
52 }
53
weighted_sum_cpu(float * a,float * b,float * s,int n,float * c)54 void weighted_sum_cpu(float *a, float *b, float *s, int n, float *c)
55 {
56 int i;
57 for(i = 0; i < n; ++i){
58 c[i] = s[i]*a[i] + (1-s[i])*(b ? b[i] : 0);
59 }
60 }
61
weighted_delta_cpu(float * a,float * b,float * s,float * da,float * db,float * ds,int n,float * dc)62 void weighted_delta_cpu(float *a, float *b, float *s, float *da, float *db, float *ds, int n, float *dc)
63 {
64 int i;
65 for(i = 0; i < n; ++i){
66 if(da) da[i] += dc[i] * s[i];
67 if(db) db[i] += dc[i] * (1-s[i]);
68 ds[i] += dc[i] * (a[i] - b[i]);
69 }
70 }
71
relu(float src)72 static float relu(float src) {
73 if (src > 0) return src;
74 return 0;
75 }
76
shortcut_multilayer_cpu(int size,int src_outputs,int batch,int n,int * outputs_of_layers,float ** layers_output,float * out,float * in,float * weights,int nweights,WEIGHTS_NORMALIZATION_T weights_normalization)77 void shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers, float **layers_output, float *out, float *in, float *weights, int nweights, WEIGHTS_NORMALIZATION_T weights_normalization)
78 {
79 // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
80 const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
81 int step = 0;
82 if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
83
84 int id;
85 #pragma omp parallel for
86 for (id = 0; id < size; ++id) {
87
88 int src_id = id;
89 const int src_i = src_id % src_outputs;
90 src_id /= src_outputs;
91 int src_b = src_id;
92
93 float sum = 1, max_val = -FLT_MAX;
94 int i;
95 if (weights && weights_normalization) {
96 if (weights_normalization == SOFTMAX_NORMALIZATION) {
97 for (i = 0; i < (n + 1); ++i) {
98 const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
99 float w = weights[weights_index];
100 if (max_val < w) max_val = w;
101 }
102 }
103 const float eps = 0.0001;
104 sum = eps;
105 for (i = 0; i < (n + 1); ++i) {
106 const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
107 const float w = weights[weights_index];
108 if (weights_normalization == RELU_NORMALIZATION) sum += relu(w);
109 else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
110 }
111 }
112
113 if (weights) {
114 float w = weights[src_i / step];
115 if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
116 else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
117
118 out[id] = in[id] * w; // [0 or c or (c, h ,w)]
119 }
120 else out[id] = in[id];
121
122 // layers
123 for (i = 0; i < n; ++i) {
124 int add_outputs = outputs_of_layers[i];
125 if (src_i < add_outputs) {
126 int add_index = add_outputs*src_b + src_i;
127 int out_index = id;
128
129 float *add = layers_output[i];
130
131 if (weights) {
132 const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
133 float w = weights[weights_index];
134 if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
135 else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
136
137 out[out_index] += add[add_index] * w; // [0 or c or (c, h ,w)]
138 }
139 else out[out_index] += add[add_index];
140 }
141 }
142 }
143 }
144
backward_shortcut_multilayer_cpu(int size,int src_outputs,int batch,int n,int * outputs_of_layers,float ** layers_delta,float * delta_out,float * delta_in,float * weights,float * weight_updates,int nweights,float * in,float ** layers_output,WEIGHTS_NORMALIZATION_T weights_normalization)145 void backward_shortcut_multilayer_cpu(int size, int src_outputs, int batch, int n, int *outputs_of_layers,
146 float **layers_delta, float *delta_out, float *delta_in, float *weights, float *weight_updates, int nweights, float *in, float **layers_output, WEIGHTS_NORMALIZATION_T weights_normalization)
147 {
148 // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
149 const int layer_step = nweights / (n + 1); // 1 or l.c or (l.c * l.h * l.w)
150 int step = 0;
151 if (nweights > 0) step = src_outputs / layer_step; // (l.c * l.h * l.w) or (l.w*l.h) or 1
152
153 int id;
154 #pragma omp parallel for
155 for (id = 0; id < size; ++id) {
156 int src_id = id;
157 int src_i = src_id % src_outputs;
158 src_id /= src_outputs;
159 int src_b = src_id;
160
161 float grad = 1, sum = 1, max_val = -FLT_MAX;;
162 int i;
163 if (weights && weights_normalization) {
164 if (weights_normalization == SOFTMAX_NORMALIZATION) {
165 for (i = 0; i < (n + 1); ++i) {
166 const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
167 float w = weights[weights_index];
168 if (max_val < w) max_val = w;
169 }
170 }
171 const float eps = 0.0001;
172 sum = eps;
173 for (i = 0; i < (n + 1); ++i) {
174 const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
175 const float w = weights[weights_index];
176 if (weights_normalization == RELU_NORMALIZATION) sum += relu(w);
177 else if (weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
178 }
179
180 /*
181 grad = 0;
182 for (i = 0; i < (n + 1); ++i) {
183 const int weights_index = src_i / step + i*layer_step; // [0 or c or (c, h ,w)]
184 const float delta_w = delta_in[id] * in[id];
185 const float w = weights[weights_index];
186 if (weights_normalization == RELU_NORMALIZATION) grad += delta_w * relu(w) / sum;
187 else if (weights_normalization == SOFTMAX_NORMALIZATION) grad += delta_w * expf(w - max_val) / sum;
188 }
189 */
190 }
191
192 if (weights) {
193 float w = weights[src_i / step];
194 if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
195 else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
196
197 delta_out[id] += delta_in[id] * w; // [0 or c or (c, h ,w)]
198 weight_updates[src_i / step] += delta_in[id] * in[id] * grad;
199 }
200 else delta_out[id] += delta_in[id];
201
202 // layers
203 for (i = 0; i < n; ++i) {
204 int add_outputs = outputs_of_layers[i];
205 if (src_i < add_outputs) {
206 int add_index = add_outputs*src_b + src_i;
207 int out_index = id;
208
209 float *layer_delta = layers_delta[i];
210 if (weights) {
211 float *add = layers_output[i];
212
213 const int weights_index = src_i / step + (i + 1)*layer_step; // [0 or c or (c, h ,w)]
214 float w = weights[weights_index];
215 if (weights_normalization == RELU_NORMALIZATION) w = relu(w) / sum;
216 else if (weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
217
218 layer_delta[add_index] += delta_in[id] * w; // [0 or c or (c, h ,w)]
219 weight_updates[weights_index] += delta_in[id] * add[add_index] * grad;
220 }
221 else layer_delta[add_index] += delta_in[id];
222 }
223 }
224 }
225 }
226
shortcut_cpu(int batch,int w1,int h1,int c1,float * add,int w2,int h2,int c2,float * out)227 void shortcut_cpu(int batch, int w1, int h1, int c1, float *add, int w2, int h2, int c2, float *out)
228 {
229 int stride = w1/w2;
230 int sample = w2/w1;
231 assert(stride == h1/h2);
232 assert(sample == h2/h1);
233 if(stride < 1) stride = 1;
234 if(sample < 1) sample = 1;
235 int minw = (w1 < w2) ? w1 : w2;
236 int minh = (h1 < h2) ? h1 : h2;
237 int minc = (c1 < c2) ? c1 : c2;
238
239 int i,j,k,b;
240 for(b = 0; b < batch; ++b){
241 for(k = 0; k < minc; ++k){
242 for(j = 0; j < minh; ++j){
243 for(i = 0; i < minw; ++i){
244 int out_index = i*sample + w2*(j*sample + h2*(k + c2*b));
245 int add_index = i*stride + w1*(j*stride + h1*(k + c1*b));
246 out[out_index] += add[add_index];
247 }
248 }
249 }
250 }
251 }
252
mean_cpu(float * x,int batch,int filters,int spatial,float * mean)253 void mean_cpu(float *x, int batch, int filters, int spatial, float *mean)
254 {
255 float scale = 1./(batch * spatial);
256 int i,j,k;
257 for(i = 0; i < filters; ++i){
258 mean[i] = 0;
259 for(j = 0; j < batch; ++j){
260 for(k = 0; k < spatial; ++k){
261 int index = j*filters*spatial + i*spatial + k;
262 mean[i] += x[index];
263 }
264 }
265 mean[i] *= scale;
266 }
267 }
268
variance_cpu(float * x,float * mean,int batch,int filters,int spatial,float * variance)269 void variance_cpu(float *x, float *mean, int batch, int filters, int spatial, float *variance)
270 {
271 float scale = 1./(batch * spatial - 1);
272 int i,j,k;
273 for(i = 0; i < filters; ++i){
274 variance[i] = 0;
275 for(j = 0; j < batch; ++j){
276 for(k = 0; k < spatial; ++k){
277 int index = j*filters*spatial + i*spatial + k;
278 variance[i] += pow((x[index] - mean[i]), 2);
279 }
280 }
281 variance[i] *= scale;
282 }
283 }
284
normalize_cpu(float * x,float * mean,float * variance,int batch,int filters,int spatial)285 void normalize_cpu(float *x, float *mean, float *variance, int batch, int filters, int spatial)
286 {
287 int b, f, i;
288 for(b = 0; b < batch; ++b){
289 for(f = 0; f < filters; ++f){
290 for(i = 0; i < spatial; ++i){
291 int index = b*filters*spatial + f*spatial + i;
292 x[index] = (x[index] - mean[f])/(sqrt(variance[f] + .000001f));
293 }
294 }
295 }
296 }
297
const_cpu(int N,float ALPHA,float * X,int INCX)298 void const_cpu(int N, float ALPHA, float *X, int INCX)
299 {
300 int i;
301 for(i = 0; i < N; ++i) X[i*INCX] = ALPHA;
302 }
303
mul_cpu(int N,float * X,int INCX,float * Y,int INCY)304 void mul_cpu(int N, float *X, int INCX, float *Y, int INCY)
305 {
306 int i;
307 for(i = 0; i < N; ++i) Y[i*INCY] *= X[i*INCX];
308 }
309
pow_cpu(int N,float ALPHA,float * X,int INCX,float * Y,int INCY)310 void pow_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
311 {
312 int i;
313 for(i = 0; i < N; ++i) Y[i*INCY] = pow(X[i*INCX], ALPHA);
314 }
315
axpy_cpu(int N,float ALPHA,float * X,int INCX,float * Y,int INCY)316 void axpy_cpu(int N, float ALPHA, float *X, int INCX, float *Y, int INCY)
317 {
318 int i;
319 for(i = 0; i < N; ++i) Y[i*INCY] += ALPHA*X[i*INCX];
320 }
321
scal_cpu(int N,float ALPHA,float * X,int INCX)322 void scal_cpu(int N, float ALPHA, float *X, int INCX)
323 {
324 int i;
325 for(i = 0; i < N; ++i) X[i*INCX] *= ALPHA;
326 }
327
scal_add_cpu(int N,float ALPHA,float BETA,float * X,int INCX)328 void scal_add_cpu(int N, float ALPHA, float BETA, float *X, int INCX)
329 {
330 int i;
331 for (i = 0; i < N; ++i) X[i*INCX] = X[i*INCX] * ALPHA + BETA;
332 }
333
fill_cpu(int N,float ALPHA,float * X,int INCX)334 void fill_cpu(int N, float ALPHA, float *X, int INCX)
335 {
336 int i;
337 if (INCX == 1 && ALPHA == 0) {
338 memset(X, 0, N * sizeof(float));
339 }
340 else {
341 for (i = 0; i < N; ++i) X[i*INCX] = ALPHA;
342 }
343 }
344
deinter_cpu(int NX,float * X,int NY,float * Y,int B,float * OUT)345 void deinter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
346 {
347 int i, j;
348 int index = 0;
349 for(j = 0; j < B; ++j) {
350 for(i = 0; i < NX; ++i){
351 if(X) X[j*NX + i] += OUT[index];
352 ++index;
353 }
354 for(i = 0; i < NY; ++i){
355 if(Y) Y[j*NY + i] += OUT[index];
356 ++index;
357 }
358 }
359 }
360
inter_cpu(int NX,float * X,int NY,float * Y,int B,float * OUT)361 void inter_cpu(int NX, float *X, int NY, float *Y, int B, float *OUT)
362 {
363 int i, j;
364 int index = 0;
365 for(j = 0; j < B; ++j) {
366 for(i = 0; i < NX; ++i){
367 OUT[index++] = X[j*NX + i];
368 }
369 for(i = 0; i < NY; ++i){
370 OUT[index++] = Y[j*NY + i];
371 }
372 }
373 }
374
copy_cpu(int N,float * X,int INCX,float * Y,int INCY)375 void copy_cpu(int N, float *X, int INCX, float *Y, int INCY)
376 {
377 int i;
378 for(i = 0; i < N; ++i) Y[i*INCY] = X[i*INCX];
379 }
380
mult_add_into_cpu(int N,float * X,float * Y,float * Z)381 void mult_add_into_cpu(int N, float *X, float *Y, float *Z)
382 {
383 int i;
384 for(i = 0; i < N; ++i) Z[i] += X[i]*Y[i];
385 }
386
smooth_l1_cpu(int n,float * pred,float * truth,float * delta,float * error)387 void smooth_l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
388 {
389 int i;
390 for(i = 0; i < n; ++i){
391 float diff = truth[i] - pred[i];
392 float abs_val = fabs(diff);
393 if(abs_val < 1) {
394 error[i] = diff * diff;
395 delta[i] = diff;
396 }
397 else {
398 error[i] = 2*abs_val - 1;
399 delta[i] = (diff > 0) ? 1 : -1;
400 }
401 }
402 }
403
l1_cpu(int n,float * pred,float * truth,float * delta,float * error)404 void l1_cpu(int n, float *pred, float *truth, float *delta, float *error)
405 {
406 int i;
407 for(i = 0; i < n; ++i){
408 float diff = truth[i] - pred[i];
409 error[i] = fabs(diff);
410 delta[i] = diff > 0 ? 1 : -1;
411 }
412 }
413
softmax_x_ent_cpu(int n,float * pred,float * truth,float * delta,float * error)414 void softmax_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
415 {
416 int i;
417 for(i = 0; i < n; ++i){
418 float t = truth[i];
419 float p = pred[i];
420 error[i] = (t) ? -log(p) : 0;
421 delta[i] = t-p;
422 }
423 }
424
logistic_x_ent_cpu(int n,float * pred,float * truth,float * delta,float * error)425 void logistic_x_ent_cpu(int n, float *pred, float *truth, float *delta, float *error)
426 {
427 int i;
428 for(i = 0; i < n; ++i){
429 float t = truth[i];
430 float p = pred[i];
431 error[i] = -t*log(p) - (1-t)*log(1-p);
432 delta[i] = t-p;
433 }
434 }
435
l2_cpu(int n,float * pred,float * truth,float * delta,float * error)436 void l2_cpu(int n, float *pred, float *truth, float *delta, float *error)
437 {
438 int i;
439 for(i = 0; i < n; ++i){
440 float diff = truth[i] - pred[i];
441 error[i] = diff * diff;
442 delta[i] = diff;
443 }
444 }
445
dot_cpu(int N,float * X,int INCX,float * Y,int INCY)446 float dot_cpu(int N, float *X, int INCX, float *Y, int INCY)
447 {
448 int i;
449 float dot = 0;
450 for(i = 0; i < N; ++i) dot += X[i*INCX] * Y[i*INCY];
451 return dot;
452 }
453
softmax(float * input,int n,float temp,float * output,int stride)454 void softmax(float *input, int n, float temp, float *output, int stride)
455 {
456 int i;
457 float sum = 0;
458 float largest = -FLT_MAX;
459 for(i = 0; i < n; ++i){
460 if(input[i*stride] > largest) largest = input[i*stride];
461 }
462 for(i = 0; i < n; ++i){
463 float e = exp(input[i*stride]/temp - largest/temp);
464 sum += e;
465 output[i*stride] = e;
466 }
467 for(i = 0; i < n; ++i){
468 output[i*stride] /= sum;
469 }
470 }
471
472
softmax_cpu(float * input,int n,int batch,int batch_offset,int groups,int group_offset,int stride,float temp,float * output)473 void softmax_cpu(float *input, int n, int batch, int batch_offset, int groups, int group_offset, int stride, float temp, float *output)
474 {
475 int g, b;
476 for(b = 0; b < batch; ++b){
477 for(g = 0; g < groups; ++g){
478 softmax(input + b*batch_offset + g*group_offset, n, temp, output + b*batch_offset + g*group_offset, stride);
479 }
480 }
481 }
482
upsample_cpu(float * in,int w,int h,int c,int batch,int stride,int forward,float scale,float * out)483 void upsample_cpu(float *in, int w, int h, int c, int batch, int stride, int forward, float scale, float *out)
484 {
485 int i, j, k, b;
486 for (b = 0; b < batch; ++b) {
487 for (k = 0; k < c; ++k) {
488 for (j = 0; j < h*stride; ++j) {
489 for (i = 0; i < w*stride; ++i) {
490 int in_index = b*w*h*c + k*w*h + (j / stride)*w + i / stride;
491 int out_index = b*w*h*c*stride*stride + k*w*h*stride*stride + j*w*stride + i;
492 if (forward) out[out_index] = scale*in[in_index];
493 else in[in_index] += scale*out[out_index];
494 }
495 }
496 }
497 }
498 }
499
500
constrain_cpu(int size,float ALPHA,float * X)501 void constrain_cpu(int size, float ALPHA, float *X)
502 {
503 int i;
504 for (i = 0; i < size; ++i) {
505 X[i] = fminf(ALPHA, fmaxf(-ALPHA, X[i]));
506 }
507 }
508
fix_nan_and_inf_cpu(float * input,size_t size)509 void fix_nan_and_inf_cpu(float *input, size_t size)
510 {
511 int i;
512 for (i = 0; i < size; ++i) {
513 float val = input[i];
514 if (isnan(val) || isinf(val))
515 input[i] = 1.0f / i; // pseudo random value
516 }
517 }
518