1 #include "layer.h"
2 #include "dark_cuda.h"
3 #include <stdlib.h>
4 
free_sublayer(layer * l)5 void free_sublayer(layer *l)
6 {
7     if (l) {
8         free_layer(*l);
9         free(l);
10     }
11 }
12 
free_layer(layer l)13 void free_layer(layer l)
14 {
15     free_layer_custom(l, 0);
16 }
17 
free_layer_custom(layer l,int keep_cudnn_desc)18 void free_layer_custom(layer l, int keep_cudnn_desc)
19 {
20     if (l.share_layer != NULL) return;    // don't free shared layers
21     if (l.antialiasing) {
22         free_sublayer(l.input_layer);
23     }
24     if (l.type == CONV_LSTM) {
25         if (l.peephole) {
26             free_sublayer(l.vf);
27             free_sublayer(l.vi);
28             free_sublayer(l.vo);
29         }
30         else {
31             free(l.vf);
32             free(l.vi);
33             free(l.vo);
34         }
35         free_sublayer(l.wf);
36         free_sublayer(l.wi);
37         free_sublayer(l.wg);
38         free_sublayer(l.wo);
39         free_sublayer(l.uf);
40         free_sublayer(l.ui);
41         free_sublayer(l.ug);
42         free_sublayer(l.uo);
43     }
44     if (l.type == CRNN) {
45         free_sublayer(l.input_layer);
46         free_sublayer(l.self_layer);
47         free_sublayer(l.output_layer);
48         l.output = NULL;
49         l.delta = NULL;
50 #ifdef GPU
51         l.output_gpu = NULL;
52         l.delta_gpu = NULL;
53 #endif // GPU
54     }
55     if (l.type == DROPOUT) {
56         if (l.rand)           free(l.rand);
57 #ifdef GPU
58         if (l.rand_gpu)              cuda_free(l.rand_gpu);
59         if (l.drop_blocks_scale)     cuda_free_host(l.drop_blocks_scale);
60         if (l.drop_blocks_scale_gpu) cuda_free(l.drop_blocks_scale_gpu);
61 #endif
62         return;
63     }
64     if (l.mask)               free(l.mask);
65     if (l.classes_multipliers)free(l.classes_multipliers);
66     if (l.cweights)           free(l.cweights);
67     if (l.indexes)            free(l.indexes);
68     if (l.input_layers)       free(l.input_layers);
69     if (l.input_sizes)        free(l.input_sizes);
70     if (l.layers_output)      free(l.layers_output);
71     if (l.layers_delta)       free(l.layers_delta);
72     if (l.map)                free(l.map);
73     if (l.rand)               free(l.rand);
74     if (l.cost)               free(l.cost);
75     if (l.state)              free(l.state);
76     if (l.prev_state)         free(l.prev_state);
77     if (l.forgot_state)       free(l.forgot_state);
78     if (l.forgot_delta)       free(l.forgot_delta);
79     if (l.state_delta)        free(l.state_delta);
80     if (l.concat)             free(l.concat);
81     if (l.concat_delta)       free(l.concat_delta);
82     if (l.binary_weights)     free(l.binary_weights);
83     if (l.biases)             free(l.biases), l.biases = NULL;
84     if (l.bias_updates)       free(l.bias_updates), l.bias_updates = NULL;
85     if (l.scales)             free(l.scales), l.scales = NULL;
86     if (l.scale_updates)      free(l.scale_updates), l.scale_updates = NULL;
87     if (l.weights)            free(l.weights), l.weights = NULL;
88     if (l.weight_updates)     free(l.weight_updates), l.weight_updates = NULL;
89     if (l.align_bit_weights)  free(l.align_bit_weights);
90     if (l.mean_arr)           free(l.mean_arr);
91 #ifdef GPU
92     if (l.delta && l.delta_pinned) {
93         cudaFreeHost(l.delta);
94         l.delta = NULL;
95     }
96     if (l.output && l.output_pinned) {
97         cudaFreeHost(l.output);
98         l.output = NULL;
99     }
100 #endif  // GPU
101     if (l.delta)              free(l.delta), l.delta = NULL;
102     if (l.output)             free(l.output), l.output = NULL;
103     if (l.activation_input)   free(l.activation_input), l.activation_input = NULL;
104     if (l.squared)            free(l.squared);
105     if (l.norms)              free(l.norms);
106     if (l.spatial_mean)       free(l.spatial_mean);
107     if (l.mean)               free(l.mean), l.mean = NULL;
108     if (l.variance)           free(l.variance), l.variance = NULL;
109     if (l.mean_delta)         free(l.mean_delta), l.mean_delta = NULL;
110     if (l.variance_delta)     free(l.variance_delta), l.variance_delta = NULL;
111     if (l.rolling_mean)       free(l.rolling_mean), l.rolling_mean = NULL;
112     if (l.rolling_variance)   free(l.rolling_variance), l.rolling_variance = NULL;
113     if (l.x)                  free(l.x);
114     if (l.x_norm)             free(l.x_norm);
115     if (l.m)                  free(l.m);
116     if (l.v)                  free(l.v);
117     if (l.z_cpu)              free(l.z_cpu);
118     if (l.r_cpu)              free(l.r_cpu);
119     if (l.binary_input)       free(l.binary_input);
120     if (l.bin_re_packed_input) free(l.bin_re_packed_input);
121     if (l.t_bit_input)        free(l.t_bit_input);
122     if (l.loss)               free(l.loss);
123 
124     // CONV-LSTM
125     if (l.f_cpu)               free(l.f_cpu);
126     if (l.i_cpu)               free(l.i_cpu);
127     if (l.g_cpu)               free(l.g_cpu);
128     if (l.o_cpu)               free(l.o_cpu);
129     if (l.c_cpu)               free(l.c_cpu);
130     if (l.h_cpu)               free(l.h_cpu);
131     if (l.temp_cpu)            free(l.temp_cpu);
132     if (l.temp2_cpu)           free(l.temp2_cpu);
133     if (l.temp3_cpu)           free(l.temp3_cpu);
134     if (l.dc_cpu)              free(l.dc_cpu);
135     if (l.dh_cpu)              free(l.dh_cpu);
136     if (l.prev_state_cpu)      free(l.prev_state_cpu);
137     if (l.prev_cell_cpu)       free(l.prev_cell_cpu);
138     if (l.stored_c_cpu)        free(l.stored_c_cpu);
139     if (l.stored_h_cpu)        free(l.stored_h_cpu);
140     if (l.cell_cpu)            free(l.cell_cpu);
141 
142 #ifdef GPU
143     if (l.indexes_gpu)           cuda_free((float *)l.indexes_gpu);
144 
145     if (l.z_gpu)                   cuda_free(l.z_gpu);
146     if (l.r_gpu)                   cuda_free(l.r_gpu);
147     if (l.m_gpu)                   cuda_free(l.m_gpu);
148     if (l.v_gpu)                   cuda_free(l.v_gpu);
149     if (l.forgot_state_gpu)        cuda_free(l.forgot_state_gpu);
150     if (l.forgot_delta_gpu)        cuda_free(l.forgot_delta_gpu);
151     if (l.state_gpu)               cuda_free(l.state_gpu);
152     if (l.state_delta_gpu)         cuda_free(l.state_delta_gpu);
153     if (l.gate_gpu)                cuda_free(l.gate_gpu);
154     if (l.gate_delta_gpu)          cuda_free(l.gate_delta_gpu);
155     if (l.save_gpu)                cuda_free(l.save_gpu);
156     if (l.save_delta_gpu)          cuda_free(l.save_delta_gpu);
157     if (l.concat_gpu)              cuda_free(l.concat_gpu);
158     if (l.concat_delta_gpu)        cuda_free(l.concat_delta_gpu);
159     if (l.binary_input_gpu)        cuda_free(l.binary_input_gpu);
160     if (l.binary_weights_gpu)      cuda_free(l.binary_weights_gpu);
161     if (l.mean_gpu)                cuda_free(l.mean_gpu), l.mean_gpu = NULL;
162     if (l.variance_gpu)            cuda_free(l.variance_gpu), l.variance_gpu = NULL;
163     if (l.m_cbn_avg_gpu)           cuda_free(l.m_cbn_avg_gpu), l.m_cbn_avg_gpu = NULL;
164     if (l.v_cbn_avg_gpu)           cuda_free(l.v_cbn_avg_gpu), l.v_cbn_avg_gpu = NULL;
165     if (l.rolling_mean_gpu)        cuda_free(l.rolling_mean_gpu), l.rolling_mean_gpu = NULL;
166     if (l.rolling_variance_gpu)    cuda_free(l.rolling_variance_gpu), l.rolling_variance_gpu = NULL;
167     if (l.variance_delta_gpu)      cuda_free(l.variance_delta_gpu), l.variance_delta_gpu = NULL;
168     if (l.mean_delta_gpu)          cuda_free(l.mean_delta_gpu), l.mean_delta_gpu = NULL;
169     if (l.x_norm_gpu)              cuda_free(l.x_norm_gpu);
170 
171     // assisted excitation
172     if (l.gt_gpu)                  cuda_free(l.gt_gpu);
173     if (l.a_avg_gpu)               cuda_free(l.a_avg_gpu);
174 
175     if (l.align_bit_weights_gpu)   cuda_free((float *)l.align_bit_weights_gpu);
176     if (l.mean_arr_gpu)            cuda_free(l.mean_arr_gpu);
177     if (l.align_workspace_gpu)     cuda_free(l.align_workspace_gpu);
178     if (l.transposed_align_workspace_gpu) cuda_free(l.transposed_align_workspace_gpu);
179 
180     if (l.weights_gpu)             cuda_free(l.weights_gpu), l.weights_gpu = NULL;
181     if (l.weight_updates_gpu)      cuda_free(l.weight_updates_gpu), l.weight_updates_gpu = NULL;
182     if (l.weight_deform_gpu)       cuda_free(l.weight_deform_gpu), l.weight_deform_gpu = NULL;
183     if (l.weights_gpu16)           cuda_free(l.weights_gpu16), l.weights_gpu16 = NULL;
184     if (l.weight_updates_gpu16)    cuda_free(l.weight_updates_gpu16), l.weight_updates_gpu16 = NULL;
185     if (l.biases_gpu)              cuda_free(l.biases_gpu), l.biases_gpu = NULL;
186     if (l.bias_updates_gpu)        cuda_free(l.bias_updates_gpu), l.bias_updates_gpu = NULL;
187     if (l.scales_gpu)              cuda_free(l.scales_gpu), l.scales_gpu = NULL;
188     if (l.scale_updates_gpu)       cuda_free(l.scale_updates_gpu), l.scale_updates_gpu = NULL;
189     if (l.input_antialiasing_gpu)  cuda_free(l.input_antialiasing_gpu), l.input_antialiasing_gpu = NULL;
190     if (l.optimized_memory < 2) {
191         if (l.x_gpu)                   cuda_free(l.x_gpu);  l.x_gpu = NULL;
192         if (l.output_gpu)              cuda_free(l.output_gpu), l.output_gpu = NULL;
193         if (l.output_avg_gpu)          cuda_free(l.output_avg_gpu), l.output_avg_gpu = NULL;
194         if (l.activation_input_gpu)    cuda_free(l.activation_input_gpu), l.activation_input_gpu = NULL;
195     }
196     if (l.delta_gpu && l.keep_delta_gpu && l.optimized_memory < 3) cuda_free(l.delta_gpu), l.delta_gpu = NULL;
197     if (l.rand_gpu)                cuda_free(l.rand_gpu);
198     if (l.squared_gpu)             cuda_free(l.squared_gpu);
199     if (l.norms_gpu)               cuda_free(l.norms_gpu);
200     if (l.input_sizes_gpu)         cuda_free((float*)l.input_sizes_gpu);
201     if (l.layers_output_gpu)       cuda_free((float*)l.layers_output_gpu);
202     if (l.layers_delta_gpu)        cuda_free((float*)l.layers_delta_gpu);
203 
204     // CONV-LSTM
205     if (l.f_gpu)                   cuda_free(l.f_gpu);
206     if (l.i_gpu)                   cuda_free(l.i_gpu);
207     if (l.g_gpu)                   cuda_free(l.g_gpu);
208     if (l.o_gpu)                   cuda_free(l.o_gpu);
209     if (l.c_gpu)                   cuda_free(l.c_gpu);
210     if (l.h_gpu)                   cuda_free(l.h_gpu);
211     if (l.temp_gpu)                cuda_free(l.temp_gpu);
212     if (l.temp2_gpu)               cuda_free(l.temp2_gpu);
213     if (l.temp3_gpu)               cuda_free(l.temp3_gpu);
214     if (l.dc_gpu)                  cuda_free(l.dc_gpu);
215     if (l.dh_gpu)                  cuda_free(l.dh_gpu);
216     if (l.prev_state_gpu)          cuda_free(l.prev_state_gpu);
217     if (l.prev_cell_gpu)           cuda_free(l.prev_cell_gpu);
218     if (l.stored_c_gpu)            cuda_free(l.stored_c_gpu);
219     if (l.stored_h_gpu)            cuda_free(l.stored_h_gpu);
220     if (l.last_prev_state_gpu)     cuda_free(l.last_prev_state_gpu);
221     if (l.last_prev_cell_gpu)      cuda_free(l.last_prev_cell_gpu);
222     if (l.cell_gpu)                cuda_free(l.cell_gpu);
223 #ifdef CUDNN   // shouldn't be used for -map
224     if (!keep_cudnn_desc) {
225         if (l.srcTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.srcTensorDesc));
226         if (l.dstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dstTensorDesc));
227         if (l.srcTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.srcTensorDesc16));
228         if (l.dstTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dstTensorDesc16));
229         if (l.dsrcTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dsrcTensorDesc));
230         if (l.ddstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.ddstTensorDesc));
231         if (l.dsrcTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.dsrcTensorDesc16));
232         if (l.ddstTensorDesc16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.ddstTensorDesc16));
233         if (l.normTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normTensorDesc));
234         if (l.normDstTensorDesc) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normDstTensorDesc));
235         if (l.normDstTensorDescF16) CHECK_CUDNN(cudnnDestroyTensorDescriptor(l.normDstTensorDescF16));
236 
237         if (l.weightDesc) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.weightDesc));
238         if (l.weightDesc16) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.weightDesc16));
239         if (l.dweightDesc) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.dweightDesc));
240         if (l.dweightDesc16) CHECK_CUDNN(cudnnDestroyFilterDescriptor(l.dweightDesc16));
241 
242         if (l.convDesc) CHECK_CUDNN(cudnnDestroyConvolutionDescriptor(l.convDesc));
243 
244         if (l.poolingDesc) CHECK_CUDNN(cudnnDestroyPoolingDescriptor(l.poolingDesc));
245 
246         //cudnnConvolutionFwdAlgo_t fw_algo, fw_algo16;
247         //cudnnConvolutionBwdDataAlgo_t bd_algo, bd_algo16;
248         //cudnnConvolutionBwdFilterAlgo_t bf_algo, bf_algo16;
249     }
250 #endif  // CUDNN
251 
252 #endif  // GPU
253 }
254