1 #include "darknet.h"
2 
3 #include <stdio.h>
4 #include <time.h>
5 #include <assert.h>
6 
7 #include "network.h"
8 #include "image.h"
9 #include "data.h"
10 #include "utils.h"
11 #include "blas.h"
12 
13 #include "crop_layer.h"
14 #include "connected_layer.h"
15 #include "gru_layer.h"
16 #include "rnn_layer.h"
17 #include "crnn_layer.h"
18 #include "conv_lstm_layer.h"
19 #include "local_layer.h"
20 #include "convolutional_layer.h"
21 #include "activation_layer.h"
22 #include "detection_layer.h"
23 #include "region_layer.h"
24 #include "normalization_layer.h"
25 #include "batchnorm_layer.h"
26 #include "maxpool_layer.h"
27 #include "reorg_layer.h"
28 #include "reorg_old_layer.h"
29 #include "avgpool_layer.h"
30 #include "cost_layer.h"
31 #include "softmax_layer.h"
32 #include "dropout_layer.h"
33 #include "route_layer.h"
34 #include "shortcut_layer.h"
35 #include "scale_channels_layer.h"
36 #include "sam_layer.h"
37 #include "yolo_layer.h"
38 #include "gaussian_yolo_layer.h"
39 #include "upsample_layer.h"
40 #include "parser.h"
41 
get_base_args(network * net)42 load_args get_base_args(network *net)
43 {
44     load_args args = { 0 };
45     args.w = net->w;
46     args.h = net->h;
47     args.size = net->w;
48 
49     args.min = net->min_crop;
50     args.max = net->max_crop;
51     args.angle = net->angle;
52     args.aspect = net->aspect;
53     args.exposure = net->exposure;
54     args.center = net->center;
55     args.saturation = net->saturation;
56     args.hue = net->hue;
57     return args;
58 }
59 
get_current_iteration(network net)60 int64_t get_current_iteration(network net)
61 {
62     return *net.cur_iteration;
63 }
64 
get_current_batch(network net)65 int get_current_batch(network net)
66 {
67     int batch_num = (*net.seen)/(net.batch*net.subdivisions);
68     return batch_num;
69 }
70 
71 /*
72 void reset_momentum(network net)
73 {
74     if (net.momentum == 0) return;
75     net.learning_rate = 0;
76     net.momentum = 0;
77     net.decay = 0;
78     #ifdef GPU
79         //if(net.gpu_index >= 0) update_network_gpu(net);
80     #endif
81 }
82 */
83 
reset_network_state(network * net,int b)84 void reset_network_state(network *net, int b)
85 {
86     int i;
87     for (i = 0; i < net->n; ++i) {
88 #ifdef GPU
89         layer l = net->layers[i];
90         if (l.state_gpu) {
91             fill_ongpu(l.outputs, 0, l.state_gpu + l.outputs*b, 1);
92         }
93         if (l.h_gpu) {
94             fill_ongpu(l.outputs, 0, l.h_gpu + l.outputs*b, 1);
95         }
96 #endif
97     }
98 }
99 
reset_rnn(network * net)100 void reset_rnn(network *net)
101 {
102     reset_network_state(net, 0);
103 }
104 
get_current_seq_subdivisions(network net)105 float get_current_seq_subdivisions(network net)
106 {
107     int sequence_subdivisions = net.init_sequential_subdivisions;
108 
109     if (net.num_steps > 0)
110     {
111         int batch_num = get_current_batch(net);
112         int i;
113         for (i = 0; i < net.num_steps; ++i) {
114             if (net.steps[i] > batch_num) break;
115             sequence_subdivisions *= net.seq_scales[i];
116         }
117     }
118     if (sequence_subdivisions < 1) sequence_subdivisions = 1;
119     if (sequence_subdivisions > net.subdivisions) sequence_subdivisions = net.subdivisions;
120     return sequence_subdivisions;
121 }
122 
get_sequence_value(network net)123 int get_sequence_value(network net)
124 {
125     int sequence = 1;
126     if (net.sequential_subdivisions != 0) sequence = net.subdivisions / net.sequential_subdivisions;
127     if (sequence < 1) sequence = 1;
128     return sequence;
129 }
130 
get_current_rate(network net)131 float get_current_rate(network net)
132 {
133     int batch_num = get_current_batch(net);
134     int i;
135     float rate;
136     if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power);
137     switch (net.policy) {
138         case CONSTANT:
139             return net.learning_rate;
140         case STEP:
141             return net.learning_rate * pow(net.scale, batch_num/net.step);
142         case STEPS:
143             rate = net.learning_rate;
144             for(i = 0; i < net.num_steps; ++i){
145                 if(net.steps[i] > batch_num) return rate;
146                 rate *= net.scales[i];
147                 //if(net.steps[i] > batch_num - 1 && net.scales[i] > 1) reset_momentum(net);
148             }
149             return rate;
150         case EXP:
151             return net.learning_rate * pow(net.gamma, batch_num);
152         case POLY:
153             return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power);
154             //if (batch_num < net.burn_in) return net.learning_rate * pow((float)batch_num / net.burn_in, net.power);
155             //return net.learning_rate * pow(1 - (float)batch_num / net.max_batches, net.power);
156         case RANDOM:
157             return net.learning_rate * pow(rand_uniform(0,1), net.power);
158         case SIG:
159             return net.learning_rate * (1./(1.+exp(net.gamma*(batch_num - net.step))));
160         case SGDR:
161         {
162             int last_iteration_start = 0;
163             int cycle_size = net.batches_per_cycle;
164             while ((last_iteration_start + cycle_size) < batch_num)
165             {
166                 last_iteration_start += cycle_size;
167                 cycle_size *= net.batches_cycle_mult;
168             }
169             rate = net.learning_rate_min +
170                 0.5*(net.learning_rate - net.learning_rate_min)
171                 * (1. + cos((float)(batch_num - last_iteration_start)*3.14159265 / cycle_size));
172 
173             return rate;
174         }
175         default:
176             fprintf(stderr, "Policy is weird!\n");
177             return net.learning_rate;
178     }
179 }
180 
get_layer_string(LAYER_TYPE a)181 char *get_layer_string(LAYER_TYPE a)
182 {
183     switch(a){
184         case CONVOLUTIONAL:
185             return "convolutional";
186         case ACTIVE:
187             return "activation";
188         case LOCAL:
189             return "local";
190         case DECONVOLUTIONAL:
191             return "deconvolutional";
192         case CONNECTED:
193             return "connected";
194         case RNN:
195             return "rnn";
196         case GRU:
197             return "gru";
198         case LSTM:
199             return "lstm";
200         case CRNN:
201             return "crnn";
202         case MAXPOOL:
203             return "maxpool";
204         case REORG:
205             return "reorg";
206         case AVGPOOL:
207             return "avgpool";
208         case SOFTMAX:
209             return "softmax";
210         case DETECTION:
211             return "detection";
212         case REGION:
213             return "region";
214         case YOLO:
215             return "yolo";
216         case GAUSSIAN_YOLO:
217             return "Gaussian_yolo";
218         case DROPOUT:
219             return "dropout";
220         case CROP:
221             return "crop";
222         case COST:
223             return "cost";
224         case ROUTE:
225             return "route";
226         case SHORTCUT:
227             return "shortcut";
228         case SCALE_CHANNELS:
229             return "scale_channels";
230         case SAM:
231             return "sam";
232         case NORMALIZATION:
233             return "normalization";
234         case BATCHNORM:
235             return "batchnorm";
236         default:
237             break;
238     }
239     return "none";
240 }
241 
make_network(int n)242 network make_network(int n)
243 {
244     network net = {0};
245     net.n = n;
246     net.layers = (layer*)xcalloc(net.n, sizeof(layer));
247     net.seen = (uint64_t*)xcalloc(1, sizeof(uint64_t));
248     net.cur_iteration = (int*)xcalloc(1, sizeof(int));
249 #ifdef GPU
250     net.input_gpu = (float**)xcalloc(1, sizeof(float*));
251     net.truth_gpu = (float**)xcalloc(1, sizeof(float*));
252 
253     net.input16_gpu = (float**)xcalloc(1, sizeof(float*));
254     net.output16_gpu = (float**)xcalloc(1, sizeof(float*));
255     net.max_input16_size = (size_t*)xcalloc(1, sizeof(size_t));
256     net.max_output16_size = (size_t*)xcalloc(1, sizeof(size_t));
257 #endif
258     return net;
259 }
260 
forward_network(network net,network_state state)261 void forward_network(network net, network_state state)
262 {
263     state.workspace = net.workspace;
264     int i;
265     for(i = 0; i < net.n; ++i){
266         state.index = i;
267         layer l = net.layers[i];
268         if(l.delta && state.train){
269             scal_cpu(l.outputs * l.batch, 0, l.delta, 1);
270         }
271         //double time = get_time_point();
272         l.forward(l, state);
273         //printf("%d - Predicted in %lf milli-seconds.\n", i, ((double)get_time_point() - time) / 1000);
274         state.input = l.output;
275 
276         /*
277         float avg_val = 0;
278         int k;
279         for (k = 0; k < l.outputs; ++k) avg_val += l.output[k];
280         printf(" i: %d - avg_val = %f \n", i, avg_val / l.outputs);
281         */
282     }
283 }
284 
update_network(network net)285 void update_network(network net)
286 {
287     int i;
288     int update_batch = net.batch*net.subdivisions;
289     float rate = get_current_rate(net);
290     for(i = 0; i < net.n; ++i){
291         layer l = net.layers[i];
292         if(l.update){
293             l.update(l, update_batch, rate, net.momentum, net.decay);
294         }
295     }
296 }
297 
get_network_output(network net)298 float *get_network_output(network net)
299 {
300 #ifdef GPU
301     if (gpu_index >= 0) return get_network_output_gpu(net);
302 #endif
303     int i;
304     for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
305     return net.layers[i].output;
306 }
307 
get_network_cost(network net)308 float get_network_cost(network net)
309 {
310     int i;
311     float sum = 0;
312     int count = 0;
313     for(i = 0; i < net.n; ++i){
314         if(net.layers[i].cost){
315             sum += net.layers[i].cost[0];
316             ++count;
317         }
318     }
319     return sum/count;
320 }
321 
get_predicted_class_network(network net)322 int get_predicted_class_network(network net)
323 {
324     float *out = get_network_output(net);
325     int k = get_network_output_size(net);
326     return max_index(out, k);
327 }
328 
backward_network(network net,network_state state)329 void backward_network(network net, network_state state)
330 {
331     int i;
332     float *original_input = state.input;
333     float *original_delta = state.delta;
334     state.workspace = net.workspace;
335     for(i = net.n-1; i >= 0; --i){
336         state.index = i;
337         if(i == 0){
338             state.input = original_input;
339             state.delta = original_delta;
340         }else{
341             layer prev = net.layers[i-1];
342             state.input = prev.output;
343             state.delta = prev.delta;
344         }
345         layer l = net.layers[i];
346         if (l.stopbackward) break;
347         if (l.onlyforward) continue;
348         l.backward(l, state);
349     }
350 }
351 
train_network_datum(network net,float * x,float * y)352 float train_network_datum(network net, float *x, float *y)
353 {
354 #ifdef GPU
355     if(gpu_index >= 0) return train_network_datum_gpu(net, x, y);
356 #endif
357     network_state state={0};
358     *net.seen += net.batch;
359     state.index = 0;
360     state.net = net;
361     state.input = x;
362     state.delta = 0;
363     state.truth = y;
364     state.train = 1;
365     forward_network(net, state);
366     backward_network(net, state);
367     float error = get_network_cost(net);
368     //if(((*net.seen)/net.batch)%net.subdivisions == 0) update_network(net);
369     return error;
370 }
371 
train_network_sgd(network net,data d,int n)372 float train_network_sgd(network net, data d, int n)
373 {
374     int batch = net.batch;
375     float* X = (float*)xcalloc(batch * d.X.cols, sizeof(float));
376     float* y = (float*)xcalloc(batch * d.y.cols, sizeof(float));
377 
378     int i;
379     float sum = 0;
380     for(i = 0; i < n; ++i){
381         get_random_batch(d, batch, X, y);
382         net.current_subdivision = i;
383         float err = train_network_datum(net, X, y);
384         sum += err;
385     }
386     free(X);
387     free(y);
388     return (float)sum/(n*batch);
389 }
390 
train_network(network net,data d)391 float train_network(network net, data d)
392 {
393     return train_network_waitkey(net, d, 0);
394 }
395 
train_network_waitkey(network net,data d,int wait_key)396 float train_network_waitkey(network net, data d, int wait_key)
397 {
398     assert(d.X.rows % net.batch == 0);
399     int batch = net.batch;
400     int n = d.X.rows / batch;
401     float* X = (float*)xcalloc(batch * d.X.cols, sizeof(float));
402     float* y = (float*)xcalloc(batch * d.y.cols, sizeof(float));
403 
404     int i;
405     float sum = 0;
406     for(i = 0; i < n; ++i){
407         get_next_batch(d, batch, i*batch, X, y);
408         net.current_subdivision = i;
409         float err = train_network_datum(net, X, y);
410         sum += err;
411         if(wait_key) wait_key_cv(5);
412     }
413     (*net.cur_iteration) += 1;
414 #ifdef GPU
415     update_network_gpu(net);
416 #else   // GPU
417     update_network(net);
418 #endif  // GPU
419     free(X);
420     free(y);
421     return (float)sum/(n*batch);
422 }
423 
424 
train_network_batch(network net,data d,int n)425 float train_network_batch(network net, data d, int n)
426 {
427     int i,j;
428     network_state state={0};
429     state.index = 0;
430     state.net = net;
431     state.train = 1;
432     state.delta = 0;
433     float sum = 0;
434     int batch = 2;
435     for(i = 0; i < n; ++i){
436         for(j = 0; j < batch; ++j){
437             int index = random_gen()%d.X.rows;
438             state.input = d.X.vals[index];
439             state.truth = d.y.vals[index];
440             forward_network(net, state);
441             backward_network(net, state);
442             sum += get_network_cost(net);
443         }
444         update_network(net);
445     }
446     return (float)sum/(n*batch);
447 }
448 
recalculate_workspace_size(network * net)449 int recalculate_workspace_size(network *net)
450 {
451 #ifdef GPU
452     cuda_set_device(net->gpu_index);
453     if (gpu_index >= 0) cuda_free(net->workspace);
454 #endif
455     int i;
456     size_t workspace_size = 0;
457     for (i = 0; i < net->n; ++i) {
458         layer l = net->layers[i];
459         //printf(" %d: layer = %d,", i, l.type);
460         if (l.type == CONVOLUTIONAL) {
461             l.workspace_size = get_convolutional_workspace_size(l);
462         }
463         else if (l.type == CONNECTED) {
464             l.workspace_size = get_connected_workspace_size(l);
465         }
466         if (l.workspace_size > workspace_size) workspace_size = l.workspace_size;
467         net->layers[i] = l;
468     }
469 
470 #ifdef GPU
471     if (gpu_index >= 0) {
472         printf("\n try to allocate additional workspace_size = %1.2f MB \n", (float)workspace_size / 1000000);
473         net->workspace = cuda_make_array(0, workspace_size / sizeof(float) + 1);
474         printf(" CUDA allocate done! \n");
475     }
476     else {
477         free(net->workspace);
478         net->workspace = (float*)xcalloc(1, workspace_size);
479     }
480 #else
481     free(net->workspace);
482     net->workspace = (float*)xcalloc(1, workspace_size);
483 #endif
484     //fprintf(stderr, " Done!\n");
485     return 0;
486 }
487 
set_batch_network(network * net,int b)488 void set_batch_network(network *net, int b)
489 {
490     net->batch = b;
491     int i;
492     for(i = 0; i < net->n; ++i){
493         net->layers[i].batch = b;
494 
495 #ifdef CUDNN
496         if(net->layers[i].type == CONVOLUTIONAL){
497             cudnn_convolutional_setup(net->layers + i, cudnn_fastest, 0);
498         }
499         else if (net->layers[i].type == MAXPOOL) {
500             cudnn_maxpool_setup(net->layers + i);
501         }
502 #endif
503 
504     }
505     recalculate_workspace_size(net); // recalculate workspace size
506 }
507 
resize_network(network * net,int w,int h)508 int resize_network(network *net, int w, int h)
509 {
510 #ifdef GPU
511     cuda_set_device(net->gpu_index);
512     if(gpu_index >= 0){
513         cuda_free(net->workspace);
514         if (net->input_gpu) {
515             cuda_free(*net->input_gpu);
516             *net->input_gpu = 0;
517             cuda_free(*net->truth_gpu);
518             *net->truth_gpu = 0;
519         }
520 
521         if (net->input_state_gpu) cuda_free(net->input_state_gpu);
522         if (net->input_pinned_cpu) {
523             if (net->input_pinned_cpu_flag) cudaFreeHost(net->input_pinned_cpu);
524             else free(net->input_pinned_cpu);
525         }
526     }
527 #endif
528     int i;
529     //if(w == net->w && h == net->h) return 0;
530     net->w = w;
531     net->h = h;
532     int inputs = 0;
533     size_t workspace_size = 0;
534     //fprintf(stderr, "Resizing to %d x %d...\n", w, h);
535     //fflush(stderr);
536     for (i = 0; i < net->n; ++i){
537         layer l = net->layers[i];
538         //printf(" (resize %d: layer = %d) , ", i, l.type);
539         if(l.type == CONVOLUTIONAL){
540             resize_convolutional_layer(&l, w, h);
541         }
542         else if (l.type == CRNN) {
543             resize_crnn_layer(&l, w, h);
544         }else if (l.type == CONV_LSTM) {
545             resize_conv_lstm_layer(&l, w, h);
546         }else if(l.type == CROP){
547             resize_crop_layer(&l, w, h);
548         }else if(l.type == MAXPOOL){
549             resize_maxpool_layer(&l, w, h);
550         }else if (l.type == LOCAL_AVGPOOL) {
551             resize_maxpool_layer(&l, w, h);
552         }else if (l.type == BATCHNORM) {
553             resize_batchnorm_layer(&l, w, h);
554         }else if(l.type == REGION){
555             resize_region_layer(&l, w, h);
556         }else if (l.type == YOLO) {
557             resize_yolo_layer(&l, w, h);
558         }else if (l.type == GAUSSIAN_YOLO) {
559             resize_gaussian_yolo_layer(&l, w, h);
560         }else if(l.type == ROUTE){
561             resize_route_layer(&l, net);
562         }else if (l.type == SHORTCUT) {
563             resize_shortcut_layer(&l, w, h, net);
564         }else if (l.type == SCALE_CHANNELS) {
565             resize_scale_channels_layer(&l, net);
566         }else if (l.type == SAM) {
567             resize_sam_layer(&l, w, h);
568         }else if (l.type == DROPOUT) {
569             resize_dropout_layer(&l, inputs);
570             l.out_w = l.w = w;
571             l.out_h = l.h = h;
572             l.output = net->layers[i - 1].output;
573             l.delta = net->layers[i - 1].delta;
574 #ifdef GPU
575             l.output_gpu = net->layers[i-1].output_gpu;
576             l.delta_gpu = net->layers[i-1].delta_gpu;
577 #endif
578         }else if (l.type == UPSAMPLE) {
579             resize_upsample_layer(&l, w, h);
580         }else if(l.type == REORG){
581             resize_reorg_layer(&l, w, h);
582         } else if (l.type == REORG_OLD) {
583             resize_reorg_old_layer(&l, w, h);
584         }else if(l.type == AVGPOOL){
585             resize_avgpool_layer(&l, w, h);
586         }else if(l.type == NORMALIZATION){
587             resize_normalization_layer(&l, w, h);
588         }else if(l.type == COST){
589             resize_cost_layer(&l, inputs);
590         }else{
591             fprintf(stderr, "Resizing type %d \n", (int)l.type);
592             error("Cannot resize this type of layer");
593         }
594         if(l.workspace_size > workspace_size) workspace_size = l.workspace_size;
595         inputs = l.outputs;
596         net->layers[i] = l;
597         //if(l.type != DROPOUT)
598         {
599             w = l.out_w;
600             h = l.out_h;
601         }
602         //if(l.type == AVGPOOL) break;
603     }
604 #ifdef GPU
605     const int size = get_network_input_size(*net) * net->batch;
606     if(gpu_index >= 0){
607         printf(" try to allocate additional workspace_size = %1.2f MB \n", (float)workspace_size / 1000000);
608         net->workspace = cuda_make_array(0, workspace_size/sizeof(float) + 1);
609         net->input_state_gpu = cuda_make_array(0, size);
610         if (cudaSuccess == cudaHostAlloc(&net->input_pinned_cpu, size * sizeof(float), cudaHostRegisterMapped))
611             net->input_pinned_cpu_flag = 1;
612         else {
613             cudaGetLastError(); // reset CUDA-error
614             net->input_pinned_cpu = (float*)xcalloc(size, sizeof(float));
615             net->input_pinned_cpu_flag = 0;
616         }
617         printf(" CUDA allocate done! \n");
618     }else {
619         free(net->workspace);
620         net->workspace = (float*)xcalloc(1, workspace_size);
621         if(!net->input_pinned_cpu_flag)
622             net->input_pinned_cpu = (float*)xrealloc(net->input_pinned_cpu, size * sizeof(float));
623     }
624 #else
625     free(net->workspace);
626     net->workspace = (float*)xcalloc(1, workspace_size);
627 #endif
628     //fprintf(stderr, " Done!\n");
629     return 0;
630 }
631 
get_network_output_size(network net)632 int get_network_output_size(network net)
633 {
634     int i;
635     for(i = net.n-1; i > 0; --i) if(net.layers[i].type != COST) break;
636     return net.layers[i].outputs;
637 }
638 
get_network_input_size(network net)639 int get_network_input_size(network net)
640 {
641     return net.layers[0].inputs;
642 }
643 
get_network_detection_layer(network net)644 detection_layer get_network_detection_layer(network net)
645 {
646     int i;
647     for(i = 0; i < net.n; ++i){
648         if(net.layers[i].type == DETECTION){
649             return net.layers[i];
650         }
651     }
652     fprintf(stderr, "Detection layer not found!!\n");
653     detection_layer l = { (LAYER_TYPE)0 };
654     return l;
655 }
656 
get_network_image_layer(network net,int i)657 image get_network_image_layer(network net, int i)
658 {
659     layer l = net.layers[i];
660     if (l.out_w && l.out_h && l.out_c){
661         return float_to_image(l.out_w, l.out_h, l.out_c, l.output);
662     }
663     image def = {0};
664     return def;
665 }
666 
get_network_layer(network * net,int i)667 layer* get_network_layer(network* net, int i)
668 {
669     return net->layers + i;
670 }
671 
get_network_image(network net)672 image get_network_image(network net)
673 {
674     int i;
675     for(i = net.n-1; i >= 0; --i){
676         image m = get_network_image_layer(net, i);
677         if(m.h != 0) return m;
678     }
679     image def = {0};
680     return def;
681 }
682 
visualize_network(network net)683 void visualize_network(network net)
684 {
685     image *prev = 0;
686     int i;
687     char buff[256];
688     for(i = 0; i < net.n; ++i){
689         sprintf(buff, "Layer %d", i);
690         layer l = net.layers[i];
691         if(l.type == CONVOLUTIONAL){
692             prev = visualize_convolutional_layer(l, buff, prev);
693         }
694     }
695 }
696 
top_predictions(network net,int k,int * index)697 void top_predictions(network net, int k, int *index)
698 {
699     int size = get_network_output_size(net);
700     float *out = get_network_output(net);
701     top_k(out, size, k, index);
702 }
703 
704 // A version of network_predict that uses a pointer for the network
705 // struct to make the python binding work properly.
network_predict_ptr(network * net,float * input)706 float *network_predict_ptr(network *net, float *input)
707 {
708     return network_predict(*net, input);
709 }
710 
network_predict(network net,float * input)711 float *network_predict(network net, float *input)
712 {
713 #ifdef GPU
714     if(gpu_index >= 0)  return network_predict_gpu(net, input);
715 #endif
716 
717     network_state state = {0};
718     state.net = net;
719     state.index = 0;
720     state.input = input;
721     state.truth = 0;
722     state.train = 0;
723     state.delta = 0;
724     forward_network(net, state);
725     float *out = get_network_output(net);
726     return out;
727 }
728 
num_detections(network * net,float thresh)729 int num_detections(network *net, float thresh)
730 {
731     int i;
732     int s = 0;
733     for (i = 0; i < net->n; ++i) {
734         layer l = net->layers[i];
735         if (l.type == YOLO) {
736             s += yolo_num_detections(l, thresh);
737         }
738         if (l.type == GAUSSIAN_YOLO) {
739             s += gaussian_yolo_num_detections(l, thresh);
740         }
741         if (l.type == DETECTION || l.type == REGION) {
742             s += l.w*l.h*l.n;
743         }
744     }
745     return s;
746 }
747 
num_detections_batch(network * net,float thresh,int batch)748 int num_detections_batch(network *net, float thresh, int batch)
749 {
750     int i;
751     int s = 0;
752     for (i = 0; i < net->n; ++i) {
753         layer l = net->layers[i];
754         if (l.type == YOLO) {
755             s += yolo_num_detections_batch(l, thresh, batch);
756         }
757         if (l.type == DETECTION || l.type == REGION) {
758             s += l.w*l.h*l.n;
759         }
760     }
761     return s;
762 }
763 
make_network_boxes(network * net,float thresh,int * num)764 detection *make_network_boxes(network *net, float thresh, int *num)
765 {
766     layer l = net->layers[net->n - 1];
767     int i;
768     int nboxes = num_detections(net, thresh);
769     if (num) *num = nboxes;
770     detection* dets = (detection*)xcalloc(nboxes, sizeof(detection));
771     for (i = 0; i < nboxes; ++i) {
772         dets[i].prob = (float*)xcalloc(l.classes, sizeof(float));
773         // tx,ty,tw,th uncertainty
774         if(l.type == GAUSSIAN_YOLO) dets[i].uc = (float*)xcalloc(4, sizeof(float)); // Gaussian_YOLOv3
775         if (l.coords > 4) {
776             dets[i].mask = (float*)xcalloc(l.coords - 4, sizeof(float));
777         }
778     }
779     return dets;
780 }
781 
make_network_boxes_batch(network * net,float thresh,int * num,int batch)782 detection *make_network_boxes_batch(network *net, float thresh, int *num, int batch)
783 {
784     int i;
785     layer l = net->layers[net->n - 1];
786     int nboxes = num_detections_batch(net, thresh, batch);
787     assert(num != NULL);
788     *num = nboxes;
789     detection* dets = (detection*)calloc(nboxes, sizeof(detection));
790     for (i = 0; i < nboxes; ++i) {
791         dets[i].prob = (float*)calloc(l.classes, sizeof(float));
792         if (l.coords > 4) {
793             dets[i].mask = (float*)calloc(l.coords - 4, sizeof(float));
794         }
795     }
796     return dets;
797 }
798 
custom_get_region_detections(layer l,int w,int h,int net_w,int net_h,float thresh,int * map,float hier,int relative,detection * dets,int letter)799 void custom_get_region_detections(layer l, int w, int h, int net_w, int net_h, float thresh, int *map, float hier, int relative, detection *dets, int letter)
800 {
801     box* boxes = (box*)xcalloc(l.w * l.h * l.n, sizeof(box));
802     float** probs = (float**)xcalloc(l.w * l.h * l.n, sizeof(float*));
803     int i, j;
804     for (j = 0; j < l.w*l.h*l.n; ++j) probs[j] = (float*)xcalloc(l.classes, sizeof(float));
805     get_region_boxes(l, 1, 1, thresh, probs, boxes, 0, map);
806     for (j = 0; j < l.w*l.h*l.n; ++j) {
807         dets[j].classes = l.classes;
808         dets[j].bbox = boxes[j];
809         dets[j].objectness = 1;
810         for (i = 0; i < l.classes; ++i) {
811             dets[j].prob[i] = probs[j][i];
812         }
813     }
814 
815     free(boxes);
816     free_ptrs((void **)probs, l.w*l.h*l.n);
817 
818     //correct_region_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative);
819     correct_yolo_boxes(dets, l.w*l.h*l.n, w, h, net_w, net_h, relative, letter);
820 }
821 
fill_network_boxes(network * net,int w,int h,float thresh,float hier,int * map,int relative,detection * dets,int letter)822 void fill_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter)
823 {
824     int prev_classes = -1;
825     int j;
826     for (j = 0; j < net->n; ++j) {
827         layer l = net->layers[j];
828         if (l.type == YOLO) {
829             int count = get_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets, letter);
830             dets += count;
831             if (prev_classes < 0) prev_classes = l.classes;
832             else if (prev_classes != l.classes) {
833                 printf(" Error: Different [yolo] layers have different number of classes = %d and %d - check your cfg-file! \n",
834                     prev_classes, l.classes);
835             }
836         }
837         if (l.type == GAUSSIAN_YOLO) {
838             int count = get_gaussian_yolo_detections(l, w, h, net->w, net->h, thresh, map, relative, dets, letter);
839             dets += count;
840         }
841         if (l.type == REGION) {
842             custom_get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets, letter);
843             //get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
844             dets += l.w*l.h*l.n;
845         }
846         if (l.type == DETECTION) {
847             get_detection_detections(l, w, h, thresh, dets);
848             dets += l.w*l.h*l.n;
849         }
850     }
851 }
852 
fill_network_boxes_batch(network * net,int w,int h,float thresh,float hier,int * map,int relative,detection * dets,int letter,int batch)853 void fill_network_boxes_batch(network *net, int w, int h, float thresh, float hier, int *map, int relative, detection *dets, int letter, int batch)
854 {
855     int prev_classes = -1;
856     int j;
857     for (j = 0; j < net->n; ++j) {
858         layer l = net->layers[j];
859         if (l.type == YOLO) {
860             int count = get_yolo_detections_batch(l, w, h, net->w, net->h, thresh, map, relative, dets, letter, batch);
861             dets += count;
862             if (prev_classes < 0) prev_classes = l.classes;
863             else if (prev_classes != l.classes) {
864                 printf(" Error: Different [yolo] layers have different number of classes = %d and %d - check your cfg-file! \n",
865                     prev_classes, l.classes);
866             }
867         }
868         if (l.type == REGION) {
869             custom_get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets, letter);
870             //get_region_detections(l, w, h, net->w, net->h, thresh, map, hier, relative, dets);
871             dets += l.w*l.h*l.n;
872         }
873         if (l.type == DETECTION) {
874             get_detection_detections(l, w, h, thresh, dets);
875             dets += l.w*l.h*l.n;
876         }
877     }
878 }
879 
get_network_boxes(network * net,int w,int h,float thresh,float hier,int * map,int relative,int * num,int letter)880 detection *get_network_boxes(network *net, int w, int h, float thresh, float hier, int *map, int relative, int *num, int letter)
881 {
882     detection *dets = make_network_boxes(net, thresh, num);
883     fill_network_boxes(net, w, h, thresh, hier, map, relative, dets, letter);
884     return dets;
885 }
886 
free_detections(detection * dets,int n)887 void free_detections(detection *dets, int n)
888 {
889     int i;
890     for (i = 0; i < n; ++i) {
891         free(dets[i].prob);
892         if (dets[i].uc) free(dets[i].uc);
893         if (dets[i].mask) free(dets[i].mask);
894     }
895     free(dets);
896 }
897 
free_batch_detections(det_num_pair * det_num_pairs,int n)898 void free_batch_detections(det_num_pair *det_num_pairs, int n)
899 {
900     int  i;
901     for(i=0; i<n; ++i)
902         free_detections(det_num_pairs[i].dets, det_num_pairs[i].num);
903     free(det_num_pairs);
904 }
905 
906 // JSON format:
907 //{
908 // "frame_id":8990,
909 // "objects":[
910 //  {"class_id":4, "name":"aeroplane", "relative coordinates":{"center_x":0.398831, "center_y":0.630203, "width":0.057455, "height":0.020396}, "confidence":0.793070},
911 //  {"class_id":14, "name":"bird", "relative coordinates":{"center_x":0.398831, "center_y":0.630203, "width":0.057455, "height":0.020396}, "confidence":0.265497}
912 // ]
913 //},
914 
detection_to_json(detection * dets,int nboxes,int classes,char ** names,long long int frame_id,char * filename)915 char *detection_to_json(detection *dets, int nboxes, int classes, char **names, long long int frame_id, char *filename)
916 {
917     const float thresh = 0.005; // function get_network_boxes() has already filtred dets by actual threshold
918 
919     char *send_buf = (char *)calloc(1024, sizeof(char));
920     if (!send_buf) return 0;
921     if (filename) {
922         sprintf(send_buf, "{\n \"frame_id\":%lld, \n \"filename\":\"%s\", \n \"objects\": [ \n", frame_id, filename);
923     }
924     else {
925         sprintf(send_buf, "{\n \"frame_id\":%lld, \n \"objects\": [ \n", frame_id);
926     }
927 
928     int i, j;
929     int class_id = -1;
930     for (i = 0; i < nboxes; ++i) {
931         for (j = 0; j < classes; ++j) {
932             int show = strncmp(names[j], "dont_show", 9);
933             if (dets[i].prob[j] > thresh && show)
934             {
935                 if (class_id != -1) strcat(send_buf, ", \n");
936                 class_id = j;
937                 char *buf = (char *)calloc(2048, sizeof(char));
938                 if (!buf) return 0;
939                 //sprintf(buf, "{\"image_id\":%d, \"category_id\":%d, \"bbox\":[%f, %f, %f, %f], \"score\":%f}",
940                 //    image_id, j, dets[i].bbox.x, dets[i].bbox.y, dets[i].bbox.w, dets[i].bbox.h, dets[i].prob[j]);
941 
942                 sprintf(buf, "  {\"class_id\":%d, \"name\":\"%s\", \"relative_coordinates\":{\"center_x\":%f, \"center_y\":%f, \"width\":%f, \"height\":%f}, \"confidence\":%f}",
943                     j, names[j], dets[i].bbox.x, dets[i].bbox.y, dets[i].bbox.w, dets[i].bbox.h, dets[i].prob[j]);
944 
945                 int send_buf_len = strlen(send_buf);
946                 int buf_len = strlen(buf);
947                 int total_len = send_buf_len + buf_len + 100;
948                 send_buf = (char *)realloc(send_buf, total_len * sizeof(char));
949                 if (!send_buf) {
950                     if (buf) free(buf);
951                     return 0;// exit(-1);
952                 }
953                 strcat(send_buf, buf);
954                 free(buf);
955             }
956         }
957     }
958     strcat(send_buf, "\n ] \n}");
959     return send_buf;
960 }
961 
962 
network_predict_image(network * net,image im)963 float *network_predict_image(network *net, image im)
964 {
965     //image imr = letterbox_image(im, net->w, net->h);
966     float *p;
967     if(net->batch != 1) set_batch_network(net, 1);
968     if (im.w == net->w && im.h == net->h) {
969         // Input image is the same size as our net, predict on that image
970         p = network_predict(*net, im.data);
971     }
972     else {
973         // Need to resize image to the desired size for the net
974         image imr = resize_image(im, net->w, net->h);
975         p = network_predict(*net, imr.data);
976         free_image(imr);
977     }
978     return p;
979 }
980 
network_predict_batch(network * net,image im,int batch_size,int w,int h,float thresh,float hier,int * map,int relative,int letter)981 det_num_pair* network_predict_batch(network *net, image im, int batch_size, int w, int h, float thresh, float hier, int *map, int relative, int letter)
982 {
983     network_predict(*net, im.data);
984     det_num_pair *pdets = (struct det_num_pair *)calloc(batch_size, sizeof(det_num_pair));
985     int num;
986     int batch;
987     for(batch=0; batch < batch_size; batch++){
988         detection *dets = make_network_boxes_batch(net, thresh, &num, batch);
989         fill_network_boxes_batch(net, w, h, thresh, hier, map, relative, dets, letter, batch);
990         pdets[batch].num = num;
991         pdets[batch].dets = dets;
992     }
993     return pdets;
994 }
995 
network_predict_image_letterbox(network * net,image im)996 float *network_predict_image_letterbox(network *net, image im)
997 {
998     //image imr = letterbox_image(im, net->w, net->h);
999     float *p;
1000     if (net->batch != 1) set_batch_network(net, 1);
1001     if (im.w == net->w && im.h == net->h) {
1002         // Input image is the same size as our net, predict on that image
1003         p = network_predict(*net, im.data);
1004     }
1005     else {
1006         // Need to resize image to the desired size for the net
1007         image imr = letterbox_image(im, net->w, net->h);
1008         p = network_predict(*net, imr.data);
1009         free_image(imr);
1010     }
1011     return p;
1012 }
1013 
network_width(network * net)1014 int network_width(network *net) { return net->w; }
network_height(network * net)1015 int network_height(network *net) { return net->h; }
1016 
network_predict_data_multi(network net,data test,int n)1017 matrix network_predict_data_multi(network net, data test, int n)
1018 {
1019     int i,j,b,m;
1020     int k = get_network_output_size(net);
1021     matrix pred = make_matrix(test.X.rows, k);
1022     float* X = (float*)xcalloc(net.batch * test.X.rows, sizeof(float));
1023     for(i = 0; i < test.X.rows; i += net.batch){
1024         for(b = 0; b < net.batch; ++b){
1025             if(i+b == test.X.rows) break;
1026             memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
1027         }
1028         for(m = 0; m < n; ++m){
1029             float *out = network_predict(net, X);
1030             for(b = 0; b < net.batch; ++b){
1031                 if(i+b == test.X.rows) break;
1032                 for(j = 0; j < k; ++j){
1033                     pred.vals[i+b][j] += out[j+b*k]/n;
1034                 }
1035             }
1036         }
1037     }
1038     free(X);
1039     return pred;
1040 }
1041 
network_predict_data(network net,data test)1042 matrix network_predict_data(network net, data test)
1043 {
1044     int i,j,b;
1045     int k = get_network_output_size(net);
1046     matrix pred = make_matrix(test.X.rows, k);
1047     float* X = (float*)xcalloc(net.batch * test.X.cols, sizeof(float));
1048     for(i = 0; i < test.X.rows; i += net.batch){
1049         for(b = 0; b < net.batch; ++b){
1050             if(i+b == test.X.rows) break;
1051             memcpy(X+b*test.X.cols, test.X.vals[i+b], test.X.cols*sizeof(float));
1052         }
1053         float *out = network_predict(net, X);
1054         for(b = 0; b < net.batch; ++b){
1055             if(i+b == test.X.rows) break;
1056             for(j = 0; j < k; ++j){
1057                 pred.vals[i+b][j] = out[j+b*k];
1058             }
1059         }
1060     }
1061     free(X);
1062     return pred;
1063 }
1064 
print_network(network net)1065 void print_network(network net)
1066 {
1067     int i,j;
1068     for(i = 0; i < net.n; ++i){
1069         layer l = net.layers[i];
1070         float *output = l.output;
1071         int n = l.outputs;
1072         float mean = mean_array(output, n);
1073         float vari = variance_array(output, n);
1074         fprintf(stderr, "Layer %d - Mean: %f, Variance: %f\n",i,mean, vari);
1075         if(n > 100) n = 100;
1076         for(j = 0; j < n; ++j) fprintf(stderr, "%f, ", output[j]);
1077         if(n == 100)fprintf(stderr,".....\n");
1078         fprintf(stderr, "\n");
1079     }
1080 }
1081 
compare_networks(network n1,network n2,data test)1082 void compare_networks(network n1, network n2, data test)
1083 {
1084     matrix g1 = network_predict_data(n1, test);
1085     matrix g2 = network_predict_data(n2, test);
1086     int i;
1087     int a,b,c,d;
1088     a = b = c = d = 0;
1089     for(i = 0; i < g1.rows; ++i){
1090         int truth = max_index(test.y.vals[i], test.y.cols);
1091         int p1 = max_index(g1.vals[i], g1.cols);
1092         int p2 = max_index(g2.vals[i], g2.cols);
1093         if(p1 == truth){
1094             if(p2 == truth) ++d;
1095             else ++c;
1096         }else{
1097             if(p2 == truth) ++b;
1098             else ++a;
1099         }
1100     }
1101     printf("%5d %5d\n%5d %5d\n", a, b, c, d);
1102     float num = pow((abs(b - c) - 1.), 2.);
1103     float den = b + c;
1104     printf("%f\n", num/den);
1105 }
1106 
network_accuracy(network net,data d)1107 float network_accuracy(network net, data d)
1108 {
1109     matrix guess = network_predict_data(net, d);
1110     float acc = matrix_topk_accuracy(d.y, guess,1);
1111     free_matrix(guess);
1112     return acc;
1113 }
1114 
network_accuracies(network net,data d,int n)1115 float *network_accuracies(network net, data d, int n)
1116 {
1117     static float acc[2];
1118     matrix guess = network_predict_data(net, d);
1119     acc[0] = matrix_topk_accuracy(d.y, guess, 1);
1120     acc[1] = matrix_topk_accuracy(d.y, guess, n);
1121     free_matrix(guess);
1122     return acc;
1123 }
1124 
network_accuracy_multi(network net,data d,int n)1125 float network_accuracy_multi(network net, data d, int n)
1126 {
1127     matrix guess = network_predict_data_multi(net, d, n);
1128     float acc = matrix_topk_accuracy(d.y, guess,1);
1129     free_matrix(guess);
1130     return acc;
1131 }
1132 
free_network(network net)1133 void free_network(network net)
1134 {
1135     int i;
1136     for (i = 0; i < net.n; ++i) {
1137         free_layer(net.layers[i]);
1138     }
1139     free(net.layers);
1140 
1141     free(net.seq_scales);
1142     free(net.scales);
1143     free(net.steps);
1144     free(net.seen);
1145     free(net.cur_iteration);
1146 
1147 #ifdef GPU
1148     if (gpu_index >= 0) cuda_free(net.workspace);
1149     else free(net.workspace);
1150     free_pinned_memory();
1151     if (net.input_state_gpu) cuda_free(net.input_state_gpu);
1152     if (net.input_pinned_cpu) {   // CPU
1153         if (net.input_pinned_cpu_flag) cudaFreeHost(net.input_pinned_cpu);
1154         else free(net.input_pinned_cpu);
1155     }
1156     if (*net.input_gpu) cuda_free(*net.input_gpu);
1157     if (*net.truth_gpu) cuda_free(*net.truth_gpu);
1158     if (net.input_gpu) free(net.input_gpu);
1159     if (net.truth_gpu) free(net.truth_gpu);
1160 
1161     if (*net.input16_gpu) cuda_free(*net.input16_gpu);
1162     if (*net.output16_gpu) cuda_free(*net.output16_gpu);
1163     if (net.input16_gpu) free(net.input16_gpu);
1164     if (net.output16_gpu) free(net.output16_gpu);
1165     if (net.max_input16_size) free(net.max_input16_size);
1166     if (net.max_output16_size) free(net.max_output16_size);
1167 #else
1168     free(net.workspace);
1169 #endif
1170 }
1171 
relu(float src)1172 static float relu(float src) {
1173     if (src > 0) return src;
1174     return 0;
1175 }
1176 
lrelu(float src)1177 static float lrelu(float src) {
1178     const float eps = 0.001;
1179     if (src > eps) return src;
1180     return eps;
1181 }
1182 
fuse_conv_batchnorm(network net)1183 void fuse_conv_batchnorm(network net)
1184 {
1185     int j;
1186     for (j = 0; j < net.n; ++j) {
1187         layer *l = &net.layers[j];
1188 
1189         if (l->type == CONVOLUTIONAL) {
1190             //printf(" Merges Convolutional-%d and batch_norm \n", j);
1191 
1192             if (l->share_layer != NULL) {
1193                 l->batch_normalize = 0;
1194             }
1195 
1196             if (l->batch_normalize) {
1197                 int f;
1198                 for (f = 0; f < l->n; ++f)
1199                 {
1200                     l->biases[f] = l->biases[f] - (double)l->scales[f] * l->rolling_mean[f] / (sqrt((double)l->rolling_variance[f] + .00001));
1201 
1202                     const size_t filter_size = l->size*l->size*l->c / l->groups;
1203                     int i;
1204                     for (i = 0; i < filter_size; ++i) {
1205                         int w_index = f*filter_size + i;
1206 
1207                         l->weights[w_index] = (double)l->weights[w_index] * l->scales[f] / (sqrt((double)l->rolling_variance[f] + .00001));
1208                     }
1209                 }
1210 
1211                 free_convolutional_batchnorm(l);
1212                 l->batch_normalize = 0;
1213 #ifdef GPU
1214                 if (gpu_index >= 0) {
1215                     push_convolutional_layer(*l);
1216                 }
1217 #endif
1218             }
1219         }
1220         else  if (l->type == SHORTCUT && l->weights && l->weights_normalization)
1221         {
1222             if (l->nweights > 0) {
1223                 //cuda_pull_array(l.weights_gpu, l.weights, l.nweights);
1224                 int i;
1225                 for (i = 0; i < l->nweights; ++i) printf(" w = %f,", l->weights[i]);
1226                 printf(" l->nweights = %d, j = %d \n", l->nweights, j);
1227             }
1228 
1229             // nweights - l.n or l.n*l.c or (l.n*l.c*l.h*l.w)
1230             const int layer_step = l->nweights / (l->n + 1);    // 1 or l.c or (l.c * l.h * l.w)
1231 
1232             int chan, i;
1233             for (chan = 0; chan < layer_step; ++chan)
1234             {
1235                 float sum = 1, max_val = -FLT_MAX;
1236 
1237                 if (l->weights_normalization == SOFTMAX_NORMALIZATION) {
1238                     for (i = 0; i < (l->n + 1); ++i) {
1239                         int w_index = chan + i * layer_step;
1240                         float w = l->weights[w_index];
1241                         if (max_val < w) max_val = w;
1242                     }
1243                 }
1244 
1245                 const float eps = 0.0001;
1246                 sum = eps;
1247 
1248                 for (i = 0; i < (l->n + 1); ++i) {
1249                     int w_index = chan + i * layer_step;
1250                     float w = l->weights[w_index];
1251                     if (l->weights_normalization == RELU_NORMALIZATION) sum += lrelu(w);
1252                     else if (l->weights_normalization == SOFTMAX_NORMALIZATION) sum += expf(w - max_val);
1253                 }
1254 
1255                 for (i = 0; i < (l->n + 1); ++i) {
1256                     int w_index = chan + i * layer_step;
1257                     float w = l->weights[w_index];
1258                     if (l->weights_normalization == RELU_NORMALIZATION) w = lrelu(w) / sum;
1259                     else if (l->weights_normalization == SOFTMAX_NORMALIZATION) w = expf(w - max_val) / sum;
1260                     l->weights[w_index] = w;
1261                 }
1262             }
1263 
1264             l->weights_normalization = NO_NORMALIZATION;
1265 
1266 #ifdef GPU
1267             if (gpu_index >= 0) {
1268                 push_shortcut_layer(*l);
1269             }
1270 #endif
1271         }
1272         else {
1273             //printf(" Fusion skip layer type: %d \n", l->type);
1274         }
1275     }
1276 }
1277 
forward_blank_layer(layer l,network_state state)1278 void forward_blank_layer(layer l, network_state state) {}
1279 
calculate_binary_weights(network net)1280 void calculate_binary_weights(network net)
1281 {
1282     int j;
1283     for (j = 0; j < net.n; ++j) {
1284         layer *l = &net.layers[j];
1285 
1286         if (l->type == CONVOLUTIONAL) {
1287             //printf(" Merges Convolutional-%d and batch_norm \n", j);
1288 
1289             if (l->xnor) {
1290                 //printf("\n %d \n", j);
1291                 //l->lda_align = 256; // 256bit for AVX2    // set in make_convolutional_layer()
1292                 //if (l->size*l->size*l->c >= 2048) l->lda_align = 512;
1293 
1294                 binary_align_weights(l);
1295 
1296                 if (net.layers[j].use_bin_output) {
1297                     l->activation = LINEAR;
1298                 }
1299 
1300 #ifdef GPU
1301                 // fuse conv_xnor + shortcut -> conv_xnor
1302                 if ((j + 1) < net.n && net.layers[j].type == CONVOLUTIONAL) {
1303                     layer *sc = &net.layers[j + 1];
1304                     if (sc->type == SHORTCUT && sc->w == sc->out_w && sc->h == sc->out_h && sc->c == sc->out_c)
1305                     {
1306                         l->bin_conv_shortcut_in_gpu = net.layers[net.layers[j + 1].index].output_gpu;
1307                         l->bin_conv_shortcut_out_gpu = net.layers[j + 1].output_gpu;
1308 
1309                         net.layers[j + 1].type = BLANK;
1310                         net.layers[j + 1].forward_gpu = forward_blank_layer;
1311                     }
1312                 }
1313 #endif  // GPU
1314             }
1315         }
1316     }
1317     //printf("\n calculate_binary_weights Done! \n");
1318 
1319 }
1320 
copy_cudnn_descriptors(layer src,layer * dst)1321 void copy_cudnn_descriptors(layer src, layer *dst)
1322 {
1323 #ifdef CUDNN
1324     dst->normTensorDesc = src.normTensorDesc;
1325     dst->normDstTensorDesc = src.normDstTensorDesc;
1326     dst->normDstTensorDescF16 = src.normDstTensorDescF16;
1327 
1328     dst->srcTensorDesc = src.srcTensorDesc;
1329     dst->dstTensorDesc = src.dstTensorDesc;
1330 
1331     dst->srcTensorDesc16 = src.srcTensorDesc16;
1332     dst->dstTensorDesc16 = src.dstTensorDesc16;
1333 #endif // CUDNN
1334 }
1335 
copy_weights_net(network net_train,network * net_map)1336 void copy_weights_net(network net_train, network *net_map)
1337 {
1338     int k;
1339     for (k = 0; k < net_train.n; ++k) {
1340         layer *l = &(net_train.layers[k]);
1341         layer tmp_layer;
1342         copy_cudnn_descriptors(net_map->layers[k], &tmp_layer);
1343         net_map->layers[k] = net_train.layers[k];
1344         copy_cudnn_descriptors(tmp_layer, &net_map->layers[k]);
1345 
1346         if (l->type == CRNN) {
1347             layer tmp_input_layer, tmp_self_layer, tmp_output_layer;
1348             copy_cudnn_descriptors(*net_map->layers[k].input_layer, &tmp_input_layer);
1349             copy_cudnn_descriptors(*net_map->layers[k].self_layer, &tmp_self_layer);
1350             copy_cudnn_descriptors(*net_map->layers[k].output_layer, &tmp_output_layer);
1351             net_map->layers[k].input_layer = net_train.layers[k].input_layer;
1352             net_map->layers[k].self_layer = net_train.layers[k].self_layer;
1353             net_map->layers[k].output_layer = net_train.layers[k].output_layer;
1354             //net_map->layers[k].output_gpu = net_map->layers[k].output_layer->output_gpu;  // already copied out of if()
1355 
1356             copy_cudnn_descriptors(tmp_input_layer, net_map->layers[k].input_layer);
1357             copy_cudnn_descriptors(tmp_self_layer, net_map->layers[k].self_layer);
1358             copy_cudnn_descriptors(tmp_output_layer, net_map->layers[k].output_layer);
1359         }
1360         else if(l->input_layer) // for AntiAliasing
1361         {
1362             layer tmp_input_layer;
1363             copy_cudnn_descriptors(*net_map->layers[k].input_layer, &tmp_input_layer);
1364             net_map->layers[k].input_layer = net_train.layers[k].input_layer;
1365             copy_cudnn_descriptors(tmp_input_layer, net_map->layers[k].input_layer);
1366         }
1367         net_map->layers[k].batch = 1;
1368         net_map->layers[k].steps = 1;
1369     }
1370 }
1371 
1372 
1373 // combine Training and Validation networks
combine_train_valid_networks(network net_train,network net_map)1374 network combine_train_valid_networks(network net_train, network net_map)
1375 {
1376     network net_combined = make_network(net_train.n);
1377     layer *old_layers = net_combined.layers;
1378     net_combined = net_train;
1379     net_combined.layers = old_layers;
1380     net_combined.batch = 1;
1381 
1382     int k;
1383     for (k = 0; k < net_train.n; ++k) {
1384         layer *l = &(net_train.layers[k]);
1385         net_combined.layers[k] = net_train.layers[k];
1386         net_combined.layers[k].batch = 1;
1387 
1388         if (l->type == CONVOLUTIONAL) {
1389 #ifdef CUDNN
1390             net_combined.layers[k].normTensorDesc = net_map.layers[k].normTensorDesc;
1391             net_combined.layers[k].normDstTensorDesc = net_map.layers[k].normDstTensorDesc;
1392             net_combined.layers[k].normDstTensorDescF16 = net_map.layers[k].normDstTensorDescF16;
1393 
1394             net_combined.layers[k].srcTensorDesc = net_map.layers[k].srcTensorDesc;
1395             net_combined.layers[k].dstTensorDesc = net_map.layers[k].dstTensorDesc;
1396 
1397             net_combined.layers[k].srcTensorDesc16 = net_map.layers[k].srcTensorDesc16;
1398             net_combined.layers[k].dstTensorDesc16 = net_map.layers[k].dstTensorDesc16;
1399 #endif // CUDNN
1400         }
1401     }
1402     return net_combined;
1403 }
1404 
free_network_recurrent_state(network net)1405 void free_network_recurrent_state(network net)
1406 {
1407     int k;
1408     for (k = 0; k < net.n; ++k) {
1409         if (net.layers[k].type == CONV_LSTM) free_state_conv_lstm(net.layers[k]);
1410         if (net.layers[k].type == CRNN) free_state_crnn(net.layers[k]);
1411     }
1412 }
1413 
randomize_network_recurrent_state(network net)1414 void randomize_network_recurrent_state(network net)
1415 {
1416     int k;
1417     for (k = 0; k < net.n; ++k) {
1418         if (net.layers[k].type == CONV_LSTM) randomize_state_conv_lstm(net.layers[k]);
1419         if (net.layers[k].type == CRNN) free_state_crnn(net.layers[k]);
1420     }
1421 }
1422 
1423 
remember_network_recurrent_state(network net)1424 void remember_network_recurrent_state(network net)
1425 {
1426     int k;
1427     for (k = 0; k < net.n; ++k) {
1428         if (net.layers[k].type == CONV_LSTM) remember_state_conv_lstm(net.layers[k]);
1429         //if (net.layers[k].type == CRNN) free_state_crnn(net.layers[k]);
1430     }
1431 }
1432 
restore_network_recurrent_state(network net)1433 void restore_network_recurrent_state(network net)
1434 {
1435     int k;
1436     for (k = 0; k < net.n; ++k) {
1437         if (net.layers[k].type == CONV_LSTM) restore_state_conv_lstm(net.layers[k]);
1438         if (net.layers[k].type == CRNN) free_state_crnn(net.layers[k]);
1439     }
1440 }
1441