1 /*
2   Copyright (C) 2006-2007 M.A.L. Marques
3                 2018-2019 Susi Lehtola
4                 2019 X. Andrade
5 
6   This Source Code Form is subject to the terms of the Mozilla Public
7   License, v. 2.0. If a copy of the MPL was not distributed with this
8   file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 */
10 
11 
12 #include "util.h"
13 
14 /* initializes the mixing */
15 void
xc_mix_init(xc_func_type * p,int n_funcs,const int * funcs_id,const double * mix_coef)16 xc_mix_init(xc_func_type *p, int n_funcs, const int *funcs_id, const double *mix_coef)
17 {
18   int ii;
19 
20   assert(p != NULL);
21   assert(p->func_aux == NULL && p->mix_coef == NULL);
22 
23   /* allocate structures needed for */
24   p->n_func_aux = n_funcs;
25   p->mix_coef   = (double *) libxc_malloc(n_funcs*sizeof(double));
26   p->func_aux   = (xc_func_type **) libxc_malloc(n_funcs*sizeof(xc_func_type *));
27 
28   for(ii=0; ii<n_funcs; ii++){
29     p->mix_coef[ii] = mix_coef[ii];
30     p->func_aux[ii] = (xc_func_type *) libxc_malloc(sizeof(xc_func_type));
31     xc_func_init (p->func_aux[ii], funcs_id[ii], p->nspin);
32   }
33 
34   /* initialize variables */
35   p->cam_omega = 0.0;
36   p->cam_alpha = 0.0;
37   p->cam_beta  = 0.0;
38   p->nlc_b     = 0.0;
39   p->nlc_C     = 0.0;
40 }
41 
42 #ifdef HAVE_CUDA
add_to_mix_gpu(size_t np,double * dst,double coeff,double * src)43 __global__ static void add_to_mix_gpu(size_t np, double * dst, double coeff, double *src){
44   size_t ip = blockIdx.x * blockDim.x + threadIdx.x;
45   if(ip < np) dst[ip] += coeff*src[ip];
46 }
47 #endif
48 
add_to_mix(size_t np,double * dst,double coeff,double * src)49 static void add_to_mix(size_t np, double * dst, double coeff, double *src){
50 #ifndef HAVE_CUDA
51   size_t ip;
52   for(ip = 0; ip < np; ip++) dst[ip] += coeff*src[ip];
53 #else
54   auto nblocks = np/CUDA_BLOCK_SIZE;
55   if(np != nblocks*CUDA_BLOCK_SIZE) nblocks++;
56   add_to_mix_gpu<<<nblocks, CUDA_BLOCK_SIZE>>>(np, dst, coeff, src);
57 #endif
58 }
59 
60 #define is_mgga(id)   ((id) == XC_FAMILY_MGGA || (id) == XC_FAMILY_HYB_MGGA)
61 #define is_gga(id)    ((id) == XC_FAMILY_GGA  || (id) == XC_FAMILY_HYB_GGA || is_mgga(id))
62 #define is_lda(id)    ((id) == XC_FAMILY_LDA  || (id) == XC_FAMILY_HYB_LDA ||  is_gga(id))
63 #define safe_free(pt) if(pt != NULL) libxc_free(pt)
64 #define sum_var(VAR) add_to_mix(np*dim->VAR, VAR, func->mix_coef[ii], VAR ## _);
65 
66 void
xc_mix_func(const xc_func_type * func,size_t np,const double * rho,const double * sigma,const double * lapl,const double * tau,double * zk,MGGA_OUT_PARAMS_NO_EXC (double *))67 xc_mix_func(const xc_func_type *func, size_t np,
68             const double *rho, const double *sigma, const double *lapl, const double *tau,
69             double *zk, MGGA_OUT_PARAMS_NO_EXC(double *))
70 {
71   const xc_func_type *aux;
72   double *zk_;
73   double *vrho_, *vsigma_, *vlapl_, *vtau_;
74   double *v2rho2_, *v2rhosigma_, *v2rholapl_, *v2rhotau_, *v2sigma2_,
75     *v2sigmalapl_, *v2sigmatau_, *v2lapl2_, *v2lapltau_,  *v2tau2_;
76   double *v3rho3_, *v3rho2sigma_, *v3rho2lapl_, *v3rho2tau_, *v3rhosigma2_,
77     *v3rhosigmalapl_, *v3rhosigmatau_, *v3rholapl2_, *v3rholapltau_,
78     *v3rhotau2_, *v3sigma3_, *v3sigma2lapl_, *v3sigma2tau_,
79     *v3sigmalapl2_, *v3sigmalapltau_, *v3sigmatau2_, *v3lapl3_,
80     *v3lapl2tau_, *v3lapltau2_, *v3tau3_;
81   double *v4rho4_, *v4rho3sigma_, *v4rho3lapl_, *v4rho3tau_, *v4rho2sigma2_,
82     *v4rho2sigmalapl_, *v4rho2sigmatau_, *v4rho2lapl2_, *v4rho2lapltau_,
83     *v4rho2tau2_, *v4rhosigma3_, *v4rhosigma2lapl_, *v4rhosigma2tau_,
84     *v4rhosigmalapl2_, *v4rhosigmalapltau_, *v4rhosigmatau2_,
85     *v4rholapl3_, *v4rholapl2tau_, *v4rholapltau2_, *v4rhotau3_,
86     *v4sigma4_, *v4sigma3lapl_, *v4sigma3tau_, *v4sigma2lapl2_,
87     *v4sigma2lapltau_, *v4sigma2tau2_, *v4sigmalapl3_, *v4sigmalapl2tau_,
88     *v4sigmalapltau2_, *v4sigmatau3_, *v4lapl4_, *v4lapl3tau_,
89     *v4lapl2tau2_, *v4lapltau3_, *v4tau4_;
90 
91   int ii;
92 
93   const xc_dimensions *dim = &(func->dim);
94 
95   /* Sanity check: have we claimed the highest possible derivatives?
96      First, check for the lowest common derivative (also need to make
97      sure the derivatives have been compiled in!)
98   */
99   int have_vxc = XC_FLAGS_I_HAVE_VXC;
100   int have_fxc = XC_FLAGS_I_HAVE_FXC;
101   int have_kxc = XC_FLAGS_I_HAVE_KXC;
102   int have_lxc = XC_FLAGS_I_HAVE_LXC;
103   for(ii=0; ii<func->n_func_aux; ii++){
104     aux = func->func_aux[ii];
105     if(! (aux->info->flags & XC_FLAGS_HAVE_VXC))
106       have_vxc = 0;
107     if(! (aux->info->flags & XC_FLAGS_HAVE_FXC))
108       have_fxc = 0;
109     if(! (aux->info->flags & XC_FLAGS_HAVE_KXC))
110       have_kxc = 0;
111     if(! (aux->info->flags & XC_FLAGS_HAVE_LXC))
112       have_lxc = 0;
113   }
114   /* Then, for the actual checks */
115   assert(have_lxc == (func->info->flags & XC_FLAGS_I_HAVE_LXC));
116   assert(have_kxc == (func->info->flags & XC_FLAGS_I_HAVE_KXC));
117   assert(have_fxc == (func->info->flags & XC_FLAGS_I_HAVE_FXC));
118   assert(have_vxc == (func->info->flags & XC_FLAGS_I_HAVE_VXC));
119 
120   /* Sanity check: if component needs the Laplacian, then the mix
121      must require it too */
122   int need_laplacian = 0;
123   for(ii=0; ii<func->n_func_aux; ii++){
124     aux = func->func_aux[ii];
125     if(aux->info->flags & XC_FLAGS_NEEDS_LAPLACIAN)
126       need_laplacian = XC_FLAGS_NEEDS_LAPLACIAN;
127   }
128   assert((func->info->flags & XC_FLAGS_NEEDS_LAPLACIAN) == need_laplacian);
129 
130   /* Check compatibility of the individual components */
131   for(ii=0; ii<func->n_func_aux; ii++){
132     aux = func->func_aux[ii];
133     /* Sanity check: if component is GGA or meta-GGA, mix functional
134        must also be GGA or meta-GGA */
135     if(is_gga(aux->info->family))
136       assert(is_gga(func->info->family));
137     if(is_mgga(aux->info->family) && !is_mgga(func->info->family))
138       assert(is_mgga(func->info->family));
139     /* Sanity checks: if mix functional has higher derivatives, these
140        must also exist in the individual components */
141     if(func->info->flags & XC_FLAGS_HAVE_VXC)
142       assert(aux->info->flags & XC_FLAGS_HAVE_VXC);
143     if(func->info->flags & XC_FLAGS_HAVE_FXC)
144       assert(aux->info->flags & XC_FLAGS_HAVE_FXC);
145     if(func->info->flags & XC_FLAGS_HAVE_KXC)
146       assert(aux->info->flags & XC_FLAGS_HAVE_KXC);
147     if(func->info->flags & XC_FLAGS_HAVE_LXC)
148       assert(aux->info->flags & XC_FLAGS_HAVE_LXC);
149   }
150 
151   /* prepare buffers that will hold the results from the individual functionals */
152   zk_ = NULL;
153 
154   vrho_ = vsigma_ = vlapl_ = vtau_ = NULL;
155 
156   v2rho2_ = v2rhosigma_ = v2rholapl_ = v2rhotau_ = v2sigma2_ =
157     v2sigmalapl_ = v2sigmatau_ = v2lapl2_ = v2lapltau_ =  v2tau2_ = NULL;
158 
159   v3rho3_ = v3rho2sigma_ = v3rho2lapl_ = v3rho2tau_ = v3rhosigma2_ =
160     v3rhosigmalapl_ = v3rhosigmatau_ = v3rholapl2_ = v3rholapltau_ =
161     v3rhotau2_ = v3sigma3_ = v3sigma2lapl_ = v3sigma2tau_ =
162     v3sigmalapl2_ = v3sigmalapltau_ = v3sigmatau2_ = v3lapl3_ =
163     v3lapl2tau_ = v3lapltau2_ = v3tau3_ = NULL;
164 
165   v4rho4_ = v4rho3sigma_ = v4rho3lapl_ = v4rho3tau_ = v4rho2sigma2_ =
166     v4rho2sigmalapl_ = v4rho2sigmatau_ = v4rho2lapl2_ = v4rho2lapltau_ =
167     v4rho2tau2_ = v4rhosigma3_ = v4rhosigma2lapl_ = v4rhosigma2tau_ =
168     v4rhosigmalapl2_ = v4rhosigmalapltau_ = v4rhosigmatau2_ =
169     v4rholapl3_ = v4rholapl2tau_ = v4rholapltau2_ = v4rhotau3_ =
170     v4sigma4_ = v4sigma3lapl_ = v4sigma3tau_ = v4sigma2lapl2_ =
171     v4sigma2lapltau_ = v4sigma2tau2_ = v4sigmalapl3_ = v4sigmalapl2tau_ =
172     v4sigmalapltau2_ = v4sigmatau3_ = v4lapl4_ = v4lapl3tau_ =
173     v4lapl2tau2_ = v4lapltau3_ = v4tau4_ = NULL;
174 
175   /* allocate buffers */
176   if(zk != NULL)
177     zk_ = (double *) libxc_malloc(sizeof(double)*np*dim->zk);
178 
179   if(vrho != NULL){
180     vrho_ = (double *) libxc_malloc(sizeof(double)*np*dim->vrho);
181     if(is_gga(func->info->family)){
182       vsigma_ = (double *) libxc_malloc(sizeof(double)*np*dim->vsigma);
183     }
184     if(is_mgga(func->info->family)){
185       if(func->info->flags & XC_FLAGS_NEEDS_LAPLACIAN){
186         vlapl_ = (double *) libxc_malloc(sizeof(double)*np*dim->vlapl);
187       }
188       vtau_  = (double *) libxc_malloc(sizeof(double)*np*dim->vtau);
189     }
190   }
191 
192   if(v2rho2 != NULL){
193     v2rho2_ = (double *) libxc_malloc(sizeof(double)*np*dim->v2rho2);
194     if(is_gga(func->info->family)){
195       v2rhosigma_  = (double *) libxc_malloc(sizeof(double)*np*dim->v2rhosigma);
196       v2sigma2_    = (double *) libxc_malloc(sizeof(double)*np*dim->v2sigma2);
197     }
198     if(is_mgga(func->info->family)){
199       v2rholapl_   = (double *) libxc_malloc(sizeof(double)*np*dim->v2rholapl);
200       v2rhotau_    = (double *) libxc_malloc(sizeof(double)*np*dim->v2rhotau);
201       v2sigmalapl_ = (double *) libxc_malloc(sizeof(double)*np*dim->v2sigmalapl);
202       v2sigmatau_  = (double *) libxc_malloc(sizeof(double)*np*dim->v2sigmatau);
203       v2lapl2_     = (double *) libxc_malloc(sizeof(double)*np*dim->v2lapl2);
204       v2lapltau_   = (double *) libxc_malloc(sizeof(double)*np*dim->v2lapltau);
205       v2tau2_      = (double *) libxc_malloc(sizeof(double)*np*dim->v2tau2);
206     }
207   }
208 
209   if(v3rho3 != NULL){
210     v3rho3_      = (double *) libxc_malloc(sizeof(double)*np*dim->v3rho3);
211     if(is_gga(func->info->family)){
212       v3rho2sigma_ = (double *) libxc_malloc(sizeof(double)*np*dim->v3rho2sigma);
213       v3rhosigma2_ = (double *) libxc_malloc(sizeof(double)*np*dim->v3rhosigma2);
214       v3sigma3_    = (double *) libxc_malloc(sizeof(double)*np*dim->v3sigma3);
215     }
216     if(is_mgga(func->info->family)){
217       v3rho2lapl_     = (double *) libxc_malloc(sizeof(double)*np*dim->v3rho2lapl);
218       v3rho2tau_      = (double *) libxc_malloc(sizeof(double)*np*dim->v3rho2tau);
219       v3rhosigmalapl_ = (double *) libxc_malloc(sizeof(double)*np*dim->v3rhosigmalapl);
220       v3rhosigmatau_  = (double *) libxc_malloc(sizeof(double)*np*dim->v3rhosigmatau);
221       v3rholapl2_     = (double *) libxc_malloc(sizeof(double)*np*dim->v3rholapl2);
222       v3rholapltau_   = (double *) libxc_malloc(sizeof(double)*np*dim->v3rholapltau);
223       v3rhotau2_      = (double *) libxc_malloc(sizeof(double)*np*dim->v3rhotau2);
224       v3sigma2lapl_   = (double *) libxc_malloc(sizeof(double)*np*dim->v3sigma2lapl);
225       v3sigma2tau_    = (double *) libxc_malloc(sizeof(double)*np*dim->v3sigma2tau);
226       v3sigmalapl2_   = (double *) libxc_malloc(sizeof(double)*np*dim->v3sigmalapl2);
227       v3sigmalapltau_ = (double *) libxc_malloc(sizeof(double)*np*dim->v3sigmalapltau);
228       v3sigmatau2_    = (double *) libxc_malloc(sizeof(double)*np*dim->v3sigmatau2);
229       v3lapl3_        = (double *) libxc_malloc(sizeof(double)*np*dim->v3lapl3);
230       v3lapl2tau_     = (double *) libxc_malloc(sizeof(double)*np*dim->v3lapl2tau);
231       v3lapltau2_     = (double *) libxc_malloc(sizeof(double)*np*dim->v3lapltau2);
232       v3tau3_         = (double *) libxc_malloc(sizeof(double)*np*dim->v3tau3);
233     }
234   }
235   if(v4rho4 != NULL){
236     v4rho4_            = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho4);
237     if(is_gga(func->info->family)){
238       v4rho3sigma_       = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho3sigma);
239       v4rho2sigma2_      = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho2sigma2);
240       v4rhosigma3_       = (double *) libxc_malloc(sizeof(double)*np*dim->v4rhosigma3);
241       v4sigma4_          = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigma4);
242     }
243     if(is_mgga(func->info->family)){
244       v4rho3lapl_        = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho3lapl);
245       v4rho3tau_         = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho3tau);
246       v4rho2sigmalapl_   = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho2sigmalapl);
247       v4rho2sigmatau_    = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho2sigmatau);
248       v4rho2lapl2_       = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho2lapl2);
249       v4rho2lapltau_     = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho2lapltau);
250       v4rho2tau2_        = (double *) libxc_malloc(sizeof(double)*np*dim->v4rho2tau2);
251       v4rhosigma2lapl_   = (double *) libxc_malloc(sizeof(double)*np*dim->v4rhosigma2lapl);
252       v4rhosigma2tau_    = (double *) libxc_malloc(sizeof(double)*np*dim->v4rhosigma2tau);
253       v4rhosigmalapl2_   = (double *) libxc_malloc(sizeof(double)*np*dim->v4rhosigmalapl2);
254       v4rhosigmalapltau_ = (double *) libxc_malloc(sizeof(double)*np*dim->v4rhosigmalapltau);
255       v4rhosigmatau2_    = (double *) libxc_malloc(sizeof(double)*np*dim->v4rhosigmatau2);
256       v4rholapl3_        = (double *) libxc_malloc(sizeof(double)*np*dim->v4rholapl3);
257       v4rholapl2tau_     = (double *) libxc_malloc(sizeof(double)*np*dim->v4rholapl2tau);
258       v4rholapltau2_     = (double *) libxc_malloc(sizeof(double)*np*dim->v4rholapltau2);
259       v4rhotau3_         = (double *) libxc_malloc(sizeof(double)*np*dim->v4rhotau3);
260       v4sigma3lapl_      = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigma3lapl);
261       v4sigma3tau_       = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigma3tau);
262       v4sigma2lapl2_     = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigma2lapl2);
263       v4sigma2lapltau_   = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigma2lapltau);
264       v4sigma2tau2_      = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigma2tau2);
265       v4sigmalapl3_      = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigmalapl3);
266       v4sigmalapl2tau_   = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigmalapl2tau);
267       v4sigmalapltau2_   = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigmalapltau2);
268       v4sigmatau3_       = (double *) libxc_malloc(sizeof(double)*np*dim->v4sigmatau3);
269       v4lapl4_           = (double *) libxc_malloc(sizeof(double)*np*dim->v4lapl4);
270       v4lapl3tau_        = (double *) libxc_malloc(sizeof(double)*np*dim->v4lapl3tau);
271       v4lapl2tau2_       = (double *) libxc_malloc(sizeof(double)*np*dim->v4lapl2tau2);
272       v4lapltau3_        = (double *) libxc_malloc(sizeof(double)*np*dim->v4lapltau3);
273       v4tau4_            = (double *) libxc_malloc(sizeof(double)*np*dim->v4tau4);
274     }
275   }
276 
277   /* Proceed by computing the mix */
278   for(ii=0; ii<func->n_func_aux; ii++){
279     aux = func->func_aux[ii];
280 
281     /* Evaluate the functional */
282     switch(aux->info->family){
283     case XC_FAMILY_LDA:
284       xc_lda(aux, np, rho, zk_, vrho_, v2rho2_, v3rho3_, v4rho4_);
285       break;
286     case XC_FAMILY_GGA:
287       xc_gga(aux, np, rho, sigma, zk_, vrho_, vsigma_,
288              v2rho2_, v2rhosigma_, v2sigma2_,
289              v3rho3_, v3rho2sigma_, v3rhosigma2_, v3sigma3_,
290              v4rho4_, v4rho3sigma_, v4rho2sigma2_, v4rhosigma3_, v4sigma4_);
291       break;
292     case XC_FAMILY_MGGA:
293       xc_mgga(aux, np, rho, sigma, lapl, tau,
294               zk_,
295               vrho_, vsigma_, vlapl_, vtau_,
296               v2rho2_, v2rhosigma_, v2rholapl_, v2rhotau_, v2sigma2_,
297               v2sigmalapl_, v2sigmatau_, v2lapl2_, v2lapltau_,  v2tau2_,
298               v3rho3_, v3rho2sigma_, v3rho2lapl_, v3rho2tau_, v3rhosigma2_,
299               v3rhosigmalapl_, v3rhosigmatau_, v3rholapl2_, v3rholapltau_,
300               v3rhotau2_, v3sigma3_, v3sigma2lapl_, v3sigma2tau_,
301               v3sigmalapl2_, v3sigmalapltau_, v3sigmatau2_, v3lapl3_,
302               v3lapl2tau_, v3lapltau2_, v3tau3_,
303               v4rho4_, v4rho3sigma_, v4rho3lapl_, v4rho3tau_, v4rho2sigma2_,
304               v4rho2sigmalapl_, v4rho2sigmatau_, v4rho2lapl2_, v4rho2lapltau_,
305               v4rho2tau2_, v4rhosigma3_, v4rhosigma2lapl_, v4rhosigma2tau_,
306               v4rhosigmalapl2_, v4rhosigmalapltau_, v4rhosigmatau2_,
307               v4rholapl3_, v4rholapl2tau_, v4rholapltau2_, v4rhotau3_,
308               v4sigma4_, v4sigma3lapl_, v4sigma3tau_, v4sigma2lapl2_,
309               v4sigma2lapltau_, v4sigma2tau2_, v4sigmalapl3_, v4sigmalapl2tau_,
310               v4sigmalapltau2_, v4sigmatau3_, v4lapl4_, v4lapl3tau_,
311               v4lapl2tau2_, v4lapltau3_, v4tau4_);
312       break;
313     }
314 
315     /* Do the mixing */
316     if(zk != NULL) {
317       sum_var(zk);
318     }
319 
320  #ifndef XC_DONT_COMPILE_VXC
321     if(vrho != NULL) {
322       sum_var(vrho);
323 
324       if(is_gga(aux->info->family)) {
325         sum_var(vsigma);
326       }
327 
328       if(is_mgga(aux->info->family)) {
329         if(aux->info->flags & XC_FLAGS_NEEDS_LAPLACIAN) {
330           sum_var(vlapl);
331         }
332         sum_var(vtau);
333       }
334     }
335 
336 #ifndef XC_DONT_COMPILE_FXC
337     if(v2rho2 != NULL){
338       sum_var(v2rho2);
339 
340       if(is_gga(aux->info->family)) {
341         sum_var(v2rhosigma);
342         sum_var(v2sigma2);
343       }
344 
345       if(is_mgga(aux->info->family)) {
346         if(aux->info->flags & XC_FLAGS_NEEDS_LAPLACIAN) {
347           sum_var(v2rholapl);
348           sum_var(v2sigmalapl);
349           sum_var(v2lapl2);
350           sum_var(v2lapltau);
351         }
352         sum_var(v2rhotau);
353         sum_var(v2sigmatau);
354         sum_var(v2tau2);
355       }
356     }
357 
358 #ifndef XC_DONT_COMPILE_KXC
359     if(v3rho3 != NULL){
360       sum_var(v3rho3);
361 
362       if(is_gga(aux->info->family)) {
363         sum_var(v3rho2sigma);
364         sum_var(v3rhosigma2);
365         sum_var(v3sigma3);
366       }
367 
368       if(is_mgga(aux->info->family)) {
369         if(aux->info->flags & XC_FLAGS_NEEDS_LAPLACIAN) {
370           sum_var(v3rho2lapl);
371           sum_var(v3rhosigmalapl);
372           sum_var(v3rholapl2);
373           sum_var(v3rholapltau);
374           sum_var(v3sigma2lapl);
375           sum_var(v3sigmalapl2);
376           sum_var(v3sigmalapltau);
377           sum_var(v3lapl3);
378           sum_var(v3lapl2tau);
379           sum_var(v3lapltau2);
380         }
381         sum_var(v3rho2tau);
382         sum_var(v3rhosigmatau);
383         sum_var(v3rhotau2);
384         sum_var(v3sigma2tau);
385         sum_var(v3sigmatau2);
386         sum_var(v3tau3);
387       }
388     }
389 
390 #ifndef XC_DONT_COMPILE_LXC
391     if(v4rho4 != NULL){
392       sum_var(v4rho4);
393 
394       if(is_gga(aux->info->family)) {
395         sum_var(v4rho3sigma);
396         sum_var(v4rho2sigma2);
397         sum_var(v4rhosigma3);
398         sum_var(v4sigma4);
399       }
400       if(is_mgga(aux->info->family)) {
401         if(aux->info->flags & XC_FLAGS_NEEDS_LAPLACIAN) {
402           sum_var(v4rho3lapl);
403           sum_var(v4rho2sigmalapl);
404           sum_var(v4rho2lapl2);
405           sum_var(v4rho2lapltau);
406           sum_var(v4rhosigma2lapl);
407           sum_var(v4rhosigmalapl2);
408           sum_var(v4rhosigmalapltau);
409           sum_var(v4rholapl3);
410           sum_var(v4rholapl2tau);
411           sum_var(v4rholapltau2);
412           sum_var(v4sigma3lapl);
413           sum_var(v4sigma2lapl2);
414           sum_var(v4sigma2lapltau);
415           sum_var(v4sigmalapl3);
416           sum_var(v4sigmalapl2tau);
417           sum_var(v4sigmalapltau2);
418           sum_var(v4lapl4);
419           sum_var(v4lapl3tau);
420           sum_var(v4lapl2tau2);
421           sum_var(v4lapltau3);
422         }
423         sum_var(v4rho3tau);
424         sum_var(v4rho2sigmatau);
425         sum_var(v4rho2tau2);
426         sum_var(v4rhosigma2tau);
427         sum_var(v4rhosigmatau2);
428         sum_var(v4rhotau3);
429         sum_var(v4sigma3tau);
430         sum_var(v4sigma2tau2);
431         sum_var(v4sigmatau3);
432         sum_var(v4tau4);
433       }
434     }
435 #endif
436 #endif
437 #endif
438 #endif
439   } /* end functional loop */
440 
441   /* deallocate internal buffers */
442   safe_free(zk_);
443 #ifndef XC_DONT_COMPILE_VXC
444   safe_free(vrho_); safe_free(vsigma_); safe_free(vlapl_); safe_free(vtau_);
445 #ifndef XC_DONT_COMPILE_FXC
446   safe_free(v2rho2_); safe_free(v2rhosigma_); safe_free(v2rholapl_); safe_free(v2rhotau_);
447   safe_free(v2sigma2_); safe_free(v2sigmalapl_); safe_free(v2sigmatau_);
448   safe_free(v2lapl2_); safe_free(v2lapltau_); safe_free(v2tau2_);
449 #ifndef XC_DONT_COMPILE_KXC
450   safe_free(v3rho3_); safe_free(v3rho2sigma_); safe_free(v3rho2lapl_); safe_free(v3rho2tau_);
451   safe_free(v3rhosigma2_); safe_free(v3rhosigmalapl_); safe_free(v3rhosigmatau_);
452   safe_free(v3rholapl2_); safe_free(v3rholapltau_); safe_free(v3rhotau2_);
453   safe_free(v3sigma3_); safe_free(v3sigma2lapl_); safe_free(v3sigma2tau_);
454   safe_free(v3sigmalapl2_); safe_free(v3sigmalapltau_); safe_free(v3sigmatau2_);
455   safe_free(v3lapl3_); safe_free(v3lapl2tau_); safe_free(v3lapltau2_); safe_free(v3tau3_);
456 #ifndef XC_DONT_COMPILE_LXC
457   safe_free(v4rho4_); safe_free(v4rho3sigma_); safe_free(v4rho3lapl_); safe_free(v4rho3tau_);
458   safe_free(v4rho2sigma2_); safe_free(v4rho2sigmalapl_); safe_free(v4rho2sigmatau_);
459   safe_free(v4rho2lapl2_); safe_free(v4rho2lapltau_); safe_free(v4rho2tau2_);
460   safe_free(v4rhosigma3_); safe_free(v4rhosigma2lapl_); safe_free(v4rhosigma2tau_);
461   safe_free(v4rhosigmalapl2_); safe_free(v4rhosigmalapltau_); safe_free(v4rhosigmatau2_);
462   safe_free(v4rholapl3_); safe_free(v4rholapl2tau_); safe_free(v4rholapltau2_); safe_free(v4rhotau3_);
463   safe_free(v4sigma4_); safe_free(v4sigma3lapl_); safe_free(v4sigma3tau_); safe_free(v4sigma2lapl2_);
464   safe_free(v4sigma2lapltau_); safe_free(v4sigma2tau2_); safe_free(v4sigmalapl3_); safe_free(v4sigmalapl2tau_);
465   safe_free(v4sigmalapltau2_); safe_free(v4sigmatau3_); safe_free(v4lapl4_); safe_free(v4lapl3tau_);
466   safe_free(v4lapl2tau2_); safe_free(v4lapltau3_); safe_free(v4tau4_);
467 #endif
468 #endif
469 #endif
470 #endif
471 }
472 
473