1 /*
2 * This file is part of the GROMACS molecular simulation package.
3 *
4 * Copyright (c) 1991-2000, University of Groningen, The Netherlands.
5 * Copyright (c) 2001-2004, The GROMACS development team.
6 * Copyright (c) 2013,2014,2015,2016,2018 by the GROMACS development team.
7 * Copyright (c) 2019,2020, by the GROMACS development team, led by
8 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
9 * and including many others, as listed in the AUTHORS file in the
10 * top-level source directory and at http://www.gromacs.org.
11 *
12 * GROMACS is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU Lesser General Public License
14 * as published by the Free Software Foundation; either version 2.1
15 * of the License, or (at your option) any later version.
16 *
17 * GROMACS is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * Lesser General Public License for more details.
21 *
22 * You should have received a copy of the GNU Lesser General Public
23 * License along with GROMACS; if not, see
24 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
25 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
26 *
27 * If you want to redistribute modifications to GROMACS, please
28 * consider that scientific software is very special. Version
29 * control is crucial - bugs must be traceable. We will be happy to
30 * consider code for inclusion in the official distribution, but
31 * derived work must not be called official GROMACS. Details are found
32 * in the README & COPYING files - if they are missing, get the
33 * official version at http://www.gromacs.org.
34 *
35 * To help us fund GROMACS development, we humbly ask that you cite
36 * the research papers on the package. Check out http://www.gromacs.org.
37 */
38 /* This file is completely threadsafe - keep it that way! */
39 #include "gmxpre.h"
40
41 #include "calcvir.h"
42
43 #include "config.h" /* for GMX_MAX_OPENMP_THREADS */
44
45 #include <algorithm>
46
47 #include "gromacs/math/vec.h"
48 #include "gromacs/math/vectypes.h"
49 #include "gromacs/mdlib/gmx_omp_nthreads.h"
50 #include "gromacs/pbcutil/ishift.h"
51 #include "gromacs/pbcutil/pbc.h"
52 #include "gromacs/utility/gmxassert.h"
53
54 #define XXXX 0
55 #define XXYY 1
56 #define XXZZ 2
57 #define YYXX 3
58 #define YYYY 4
59 #define YYZZ 5
60 #define ZZXX 6
61 #define ZZYY 7
62 #define ZZZZ 8
63
upd_vir(rvec vir,real dvx,real dvy,real dvz)64 static void upd_vir(rvec vir, real dvx, real dvy, real dvz)
65 {
66 vir[XX] -= 0.5 * dvx;
67 vir[YY] -= 0.5 * dvy;
68 vir[ZZ] -= 0.5 * dvz;
69 }
70
calc_x_times_f(int nxf,const rvec x[],const rvec f[],gmx_bool bScrewPBC,const matrix box,matrix x_times_f)71 static void calc_x_times_f(int nxf, const rvec x[], const rvec f[], gmx_bool bScrewPBC, const matrix box, matrix x_times_f)
72 {
73 clear_mat(x_times_f);
74
75 for (int i = 0; i < nxf; i++)
76 {
77 for (int d = 0; d < DIM; d++)
78 {
79 for (int n = 0; n < DIM; n++)
80 {
81 x_times_f[d][n] += x[i][d] * f[i][n];
82 }
83 }
84
85 if (bScrewPBC)
86 {
87 int isx = IS2X(i);
88 /* We should correct all odd x-shifts, but the range of isx is -2 to 2 */
89 if (isx == 1 || isx == -1)
90 {
91 for (int d = 0; d < DIM; d++)
92 {
93 for (int n = 0; n < DIM; n++)
94 {
95 x_times_f[d][n] += box[d][d] * f[i][n];
96 }
97 }
98 }
99 }
100 }
101 }
102
calc_vir(int nxf,const rvec x[],const rvec f[],tensor vir,bool bScrewPBC,const matrix box)103 void calc_vir(int nxf, const rvec x[], const rvec f[], tensor vir, bool bScrewPBC, const matrix box)
104 {
105 matrix x_times_f;
106
107 int nthreads = gmx_omp_nthreads_get_simple_rvec_task(emntDefault, nxf * 9);
108
109 GMX_ASSERT(nthreads >= 1, "Avoids uninitialized x_times_f (warning)");
110
111 if (nthreads == 1)
112 {
113 calc_x_times_f(nxf, x, f, bScrewPBC, box, x_times_f);
114 }
115 else
116 {
117 /* Use a buffer on the stack for storing thread-local results.
118 * We use 2 extra elements (=18 reals) per thread to separate thread
119 * local data by at least a cache line. Element 0 is not used.
120 */
121 matrix xf_buf[GMX_OPENMP_MAX_THREADS * 3];
122
123 #pragma omp parallel for num_threads(nthreads) schedule(static)
124 for (int thread = 0; thread < nthreads; thread++)
125 {
126 int start = (nxf * thread) / nthreads;
127 int end = std::min(nxf * (thread + 1) / nthreads, nxf);
128
129 calc_x_times_f(end - start, x + start, f + start, bScrewPBC, box,
130 thread == 0 ? x_times_f : xf_buf[thread * 3]);
131 }
132
133 for (int thread = 1; thread < nthreads; thread++)
134 {
135 m_add(x_times_f, xf_buf[thread * 3], x_times_f);
136 }
137 }
138
139 for (int d = 0; d < DIM; d++)
140 {
141 upd_vir(vir[d], x_times_f[d][XX], x_times_f[d][YY], x_times_f[d][ZZ]);
142 }
143 }
144
f_calc_vir(int i0,int i1,const rvec x[],const rvec f[],tensor vir,const matrix box)145 void f_calc_vir(int i0, int i1, const rvec x[], const rvec f[], tensor vir, const matrix box)
146 {
147 calc_vir(i1 - i0, x + i0, f + i0, vir, FALSE, box);
148 }
149