1 /*
2 * This file is part of the GROMACS molecular simulation package.
3 *
4 * Copyright (c) 2016,2017,2018,2019,2020 by the GROMACS development team.
5 * Copyright (c) 2021, by the GROMACS development team, led by
6 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
7 * and including many others, as listed in the AUTHORS file in the
8 * top-level source directory and at http://www.gromacs.org.
9 *
10 * GROMACS is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public License
12 * as published by the Free Software Foundation; either version 2.1
13 * of the License, or (at your option) any later version.
14 *
15 * GROMACS is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
19 *
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with GROMACS; if not, see
22 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
23 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 *
25 * If you want to redistribute modifications to GROMACS, please
26 * consider that scientific software is very special. Version
27 * control is crucial - bugs must be traceable. We will be happy to
28 * consider code for inclusion in the official distribution, but
29 * derived work must not be called official GROMACS. Details are found
30 * in the README & COPYING files - if they are missing, get the
31 * official version at http://www.gromacs.org.
32 *
33 * To help us fund GROMACS development, we humbly ask that you cite
34 * the research papers on the package. Check out http://www.gromacs.org.
35 */
36
37 /*! \internal \file
38 *
39 * \brief This file contains internal function implementations
40 * for performing the PME calculations on GPU.
41 *
42 * Note that this file is compiled as regular C++ source in OpenCL builds, but
43 * it is treated as CUDA source in CUDA-enabled GPU builds.
44 *
45 * \author Aleksei Iupinov <a.yupinov@gmail.com>
46 * \ingroup module_ewald
47 */
48
49 #include "gmxpre.h"
50
51 #include "pme_gpu_internal.h"
52
53 #include "config.h"
54
55 #include <list>
56 #include <memory>
57 #include <string>
58
59 #include "gromacs/ewald/ewald_utils.h"
60 #include "gromacs/gpu_utils/device_context.h"
61 #include "gromacs/gpu_utils/device_stream.h"
62 #include "gromacs/gpu_utils/gpu_utils.h"
63 #include "gromacs/hardware/device_information.h"
64 #include "gromacs/math/invertmatrix.h"
65 #include "gromacs/math/units.h"
66 #include "gromacs/timing/gpu_timing.h"
67 #include "gromacs/utility/exceptions.h"
68 #include "gromacs/utility/fatalerror.h"
69 #include "gromacs/utility/gmxassert.h"
70 #include "gromacs/utility/logger.h"
71 #include "gromacs/utility/stringutil.h"
72
73 #if GMX_GPU_CUDA
74 # include "gromacs/gpu_utils/pmalloc_cuda.h"
75
76 # include "pme.cuh"
77 #elif GMX_GPU_OPENCL
78 # include "gromacs/gpu_utils/gmxopencl.h"
79 #endif
80
81 #include "gromacs/ewald/pme.h"
82
83 #include "pme_gpu_3dfft.h"
84 #include "pme_gpu_calculate_splines.h"
85 #include "pme_gpu_constants.h"
86 #include "pme_gpu_program_impl.h"
87 #include "pme_gpu_timings.h"
88 #include "pme_gpu_types.h"
89 #include "pme_gpu_types_host.h"
90 #include "pme_gpu_types_host_impl.h"
91 #include "pme_grid.h"
92 #include "pme_internal.h"
93 #include "pme_solve.h"
94
95 /*! \brief
96 * CUDA only
97 * Atom limit above which it is advantageous to turn on the
98 * recalculating of the splines in the gather and using less threads per atom in the spline and spread
99 */
100 constexpr int c_pmeGpuPerformanceAtomLimit = 23000;
101
102 /*! \internal \brief
103 * Wrapper for getting a pointer to the plain C++ part of the GPU kernel parameters structure.
104 *
105 * \param[in] pmeGpu The PME GPU structure.
106 * \returns The pointer to the kernel parameters.
107 */
pme_gpu_get_kernel_params_base_ptr(const PmeGpu * pmeGpu)108 static PmeGpuKernelParamsBase* pme_gpu_get_kernel_params_base_ptr(const PmeGpu* pmeGpu)
109 {
110 // reinterpret_cast is needed because the derived CUDA structure is not known in this file
111 auto* kernelParamsPtr = reinterpret_cast<PmeGpuKernelParamsBase*>(pmeGpu->kernelParams.get());
112 return kernelParamsPtr;
113 }
114
115 /*! \brief
116 * Atom data block size (in terms of number of atoms).
117 * This is the least common multiple of number of atoms processed by
118 * a single block/workgroup of the spread and gather kernels.
119 * The GPU atom data buffers must be padded, which means that
120 * the numbers of atoms used for determining the size of the memory
121 * allocation must be divisible by this.
122 */
123 constexpr int c_pmeAtomDataBlockSize = 64;
124
pme_gpu_get_atom_data_block_size()125 int pme_gpu_get_atom_data_block_size()
126 {
127 return c_pmeAtomDataBlockSize;
128 }
129
pme_gpu_synchronize(const PmeGpu * pmeGpu)130 void pme_gpu_synchronize(const PmeGpu* pmeGpu)
131 {
132 pmeGpu->archSpecific->pmeStream_.synchronize();
133 }
134
pme_gpu_alloc_energy_virial(PmeGpu * pmeGpu)135 void pme_gpu_alloc_energy_virial(PmeGpu* pmeGpu)
136 {
137 const size_t energyAndVirialSize = c_virialAndEnergyCount * sizeof(float);
138
139 GMX_ASSERT(
140 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
141 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
142
143 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
144 {
145 allocateDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex],
146 c_virialAndEnergyCount, pmeGpu->archSpecific->deviceContext_);
147 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_virialAndEnergy[gridIndex]), energyAndVirialSize);
148 }
149 }
150
pme_gpu_free_energy_virial(PmeGpu * pmeGpu)151 void pme_gpu_free_energy_virial(PmeGpu* pmeGpu)
152 {
153 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
154 {
155 freeDeviceBuffer(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex]);
156 pfree(pmeGpu->staging.h_virialAndEnergy[gridIndex]);
157 pmeGpu->staging.h_virialAndEnergy[gridIndex] = nullptr;
158 }
159 }
160
pme_gpu_clear_energy_virial(const PmeGpu * pmeGpu)161 void pme_gpu_clear_energy_virial(const PmeGpu* pmeGpu)
162 {
163 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
164 {
165 clearDeviceBufferAsync(&pmeGpu->kernelParams->constants.d_virialAndEnergy[gridIndex], 0,
166 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_);
167 }
168 }
169
pme_gpu_realloc_and_copy_bspline_values(PmeGpu * pmeGpu,const int gridIndex)170 void pme_gpu_realloc_and_copy_bspline_values(PmeGpu* pmeGpu, const int gridIndex)
171 {
172 GMX_ASSERT(
173 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
174 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
175 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
176 "Invalid combination of gridIndex and number of grids");
177
178 const int splineValuesOffset[DIM] = { 0, pmeGpu->kernelParams->grid.realGridSize[XX],
179 pmeGpu->kernelParams->grid.realGridSize[XX]
180 + pmeGpu->kernelParams->grid.realGridSize[YY] };
181 memcpy(&pmeGpu->kernelParams->grid.splineValuesOffset, &splineValuesOffset, sizeof(splineValuesOffset));
182
183 const int newSplineValuesSize = pmeGpu->kernelParams->grid.realGridSize[XX]
184 + pmeGpu->kernelParams->grid.realGridSize[YY]
185 + pmeGpu->kernelParams->grid.realGridSize[ZZ];
186 const bool shouldRealloc = (newSplineValuesSize > pmeGpu->archSpecific->splineValuesSize[gridIndex]);
187 reallocateDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
188 newSplineValuesSize, &pmeGpu->archSpecific->splineValuesSize[gridIndex],
189 &pmeGpu->archSpecific->splineValuesCapacity[gridIndex],
190 pmeGpu->archSpecific->deviceContext_);
191 if (shouldRealloc)
192 {
193 /* Reallocate the host buffer */
194 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
195 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_splineModuli[gridIndex]),
196 newSplineValuesSize * sizeof(float));
197 }
198 for (int i = 0; i < DIM; i++)
199 {
200 memcpy(pmeGpu->staging.h_splineModuli[gridIndex] + splineValuesOffset[i],
201 pmeGpu->common->bsp_mod[i].data(), pmeGpu->common->bsp_mod[i].size() * sizeof(float));
202 }
203 /* TODO: pin original buffer instead! */
204 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex],
205 pmeGpu->staging.h_splineModuli[gridIndex], 0, newSplineValuesSize,
206 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
207 }
208
pme_gpu_free_bspline_values(const PmeGpu * pmeGpu)209 void pme_gpu_free_bspline_values(const PmeGpu* pmeGpu)
210 {
211 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
212 {
213 pfree(pmeGpu->staging.h_splineModuli[gridIndex]);
214 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_splineModuli[gridIndex]);
215 }
216 }
217
pme_gpu_realloc_forces(PmeGpu * pmeGpu)218 void pme_gpu_realloc_forces(PmeGpu* pmeGpu)
219 {
220 const size_t newForcesSize = pmeGpu->nAtomsAlloc * DIM;
221 GMX_ASSERT(newForcesSize > 0, "Bad number of atoms in PME GPU");
222 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, newForcesSize,
223 &pmeGpu->archSpecific->forcesSize, &pmeGpu->archSpecific->forcesSizeAlloc,
224 pmeGpu->archSpecific->deviceContext_);
225 pmeGpu->staging.h_forces.reserveWithPadding(pmeGpu->nAtomsAlloc);
226 pmeGpu->staging.h_forces.resizeWithPadding(pmeGpu->kernelParams->atoms.nAtoms);
227 }
228
pme_gpu_free_forces(const PmeGpu * pmeGpu)229 void pme_gpu_free_forces(const PmeGpu* pmeGpu)
230 {
231 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces);
232 }
233
pme_gpu_copy_input_forces(PmeGpu * pmeGpu)234 void pme_gpu_copy_input_forces(PmeGpu* pmeGpu)
235 {
236 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
237 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
238 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_forces, h_forcesFloat, 0,
239 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
240 pmeGpu->settings.transferKind, nullptr);
241 }
242
pme_gpu_copy_output_forces(PmeGpu * pmeGpu)243 void pme_gpu_copy_output_forces(PmeGpu* pmeGpu)
244 {
245 GMX_ASSERT(pmeGpu->kernelParams->atoms.nAtoms > 0, "Bad number of atoms in PME GPU");
246 float* h_forcesFloat = reinterpret_cast<float*>(pmeGpu->staging.h_forces.data());
247 copyFromDeviceBuffer(h_forcesFloat, &pmeGpu->kernelParams->atoms.d_forces, 0,
248 DIM * pmeGpu->kernelParams->atoms.nAtoms, pmeGpu->archSpecific->pmeStream_,
249 pmeGpu->settings.transferKind, nullptr);
250 }
251
pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu * pmeGpu,const float * h_coefficients,const int gridIndex)252 void pme_gpu_realloc_and_copy_input_coefficients(const PmeGpu* pmeGpu,
253 const float* h_coefficients,
254 const int gridIndex)
255 {
256 GMX_ASSERT(h_coefficients, "Bad host-side charge buffer in PME GPU");
257 const size_t newCoefficientsSize = pmeGpu->nAtomsAlloc;
258 GMX_ASSERT(newCoefficientsSize > 0, "Bad number of atoms in PME GPU");
259 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
260 newCoefficientsSize, &pmeGpu->archSpecific->coefficientsSize[gridIndex],
261 &pmeGpu->archSpecific->coefficientsCapacity[gridIndex],
262 pmeGpu->archSpecific->deviceContext_);
263 copyToDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex],
264 const_cast<float*>(h_coefficients), 0, pmeGpu->kernelParams->atoms.nAtoms,
265 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
266
267 const size_t paddingIndex = pmeGpu->kernelParams->atoms.nAtoms;
268 const size_t paddingCount = pmeGpu->nAtomsAlloc - paddingIndex;
269 if (paddingCount > 0)
270 {
271 clearDeviceBufferAsync(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex], paddingIndex,
272 paddingCount, pmeGpu->archSpecific->pmeStream_);
273 }
274 }
275
pme_gpu_free_coefficients(const PmeGpu * pmeGpu)276 void pme_gpu_free_coefficients(const PmeGpu* pmeGpu)
277 {
278 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
279 {
280 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_coefficients[gridIndex]);
281 }
282 }
283
pme_gpu_realloc_spline_data(PmeGpu * pmeGpu)284 void pme_gpu_realloc_spline_data(PmeGpu* pmeGpu)
285 {
286 const int order = pmeGpu->common->pme_order;
287 const int newSplineDataSize = DIM * order * pmeGpu->nAtomsAlloc;
288 GMX_ASSERT(newSplineDataSize > 0, "Bad number of atoms in PME GPU");
289 /* Two arrays of the same size */
290 const bool shouldRealloc = (newSplineDataSize > pmeGpu->archSpecific->splineDataSize);
291 int currentSizeTemp = pmeGpu->archSpecific->splineDataSize;
292 int currentSizeTempAlloc = pmeGpu->archSpecific->splineDataSizeAlloc;
293 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta, newSplineDataSize, ¤tSizeTemp,
294 ¤tSizeTempAlloc, pmeGpu->archSpecific->deviceContext_);
295 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta, newSplineDataSize,
296 &pmeGpu->archSpecific->splineDataSize, &pmeGpu->archSpecific->splineDataSizeAlloc,
297 pmeGpu->archSpecific->deviceContext_);
298 // the host side reallocation
299 if (shouldRealloc)
300 {
301 pfree(pmeGpu->staging.h_theta);
302 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_theta), newSplineDataSize * sizeof(float));
303 pfree(pmeGpu->staging.h_dtheta);
304 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_dtheta), newSplineDataSize * sizeof(float));
305 }
306 }
307
pme_gpu_free_spline_data(const PmeGpu * pmeGpu)308 void pme_gpu_free_spline_data(const PmeGpu* pmeGpu)
309 {
310 /* Two arrays of the same size */
311 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_theta);
312 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_dtheta);
313 pfree(pmeGpu->staging.h_theta);
314 pfree(pmeGpu->staging.h_dtheta);
315 }
316
pme_gpu_realloc_grid_indices(PmeGpu * pmeGpu)317 void pme_gpu_realloc_grid_indices(PmeGpu* pmeGpu)
318 {
319 const size_t newIndicesSize = DIM * pmeGpu->nAtomsAlloc;
320 GMX_ASSERT(newIndicesSize > 0, "Bad number of atoms in PME GPU");
321 reallocateDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices, newIndicesSize,
322 &pmeGpu->archSpecific->gridlineIndicesSize,
323 &pmeGpu->archSpecific->gridlineIndicesSizeAlloc,
324 pmeGpu->archSpecific->deviceContext_);
325 pfree(pmeGpu->staging.h_gridlineIndices);
326 pmalloc(reinterpret_cast<void**>(&pmeGpu->staging.h_gridlineIndices), newIndicesSize * sizeof(int));
327 }
328
pme_gpu_free_grid_indices(const PmeGpu * pmeGpu)329 void pme_gpu_free_grid_indices(const PmeGpu* pmeGpu)
330 {
331 freeDeviceBuffer(&pmeGpu->kernelParams->atoms.d_gridlineIndices);
332 pfree(pmeGpu->staging.h_gridlineIndices);
333 }
334
pme_gpu_realloc_grids(PmeGpu * pmeGpu)335 void pme_gpu_realloc_grids(PmeGpu* pmeGpu)
336 {
337 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
338
339 const int newRealGridSize = kernelParamsPtr->grid.realGridSizePadded[XX]
340 * kernelParamsPtr->grid.realGridSizePadded[YY]
341 * kernelParamsPtr->grid.realGridSizePadded[ZZ];
342 const int newComplexGridSize = kernelParamsPtr->grid.complexGridSizePadded[XX]
343 * kernelParamsPtr->grid.complexGridSizePadded[YY]
344 * kernelParamsPtr->grid.complexGridSizePadded[ZZ] * 2;
345 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
346 {
347 // Multiplied by 2 because we count complex grid size for complex numbers, but all allocations/pointers are float
348 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
349 {
350 /* 2 separate grids */
351 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], newComplexGridSize,
352 &pmeGpu->archSpecific->complexGridSize[gridIndex],
353 &pmeGpu->archSpecific->complexGridCapacity[gridIndex],
354 pmeGpu->archSpecific->deviceContext_);
355 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newRealGridSize,
356 &pmeGpu->archSpecific->realGridSize[gridIndex],
357 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
358 pmeGpu->archSpecific->deviceContext_);
359 }
360 else
361 {
362 /* A single buffer so that any grid will fit */
363 const int newGridsSize = std::max(newRealGridSize, newComplexGridSize);
364 reallocateDeviceBuffer(&kernelParamsPtr->grid.d_realGrid[gridIndex], newGridsSize,
365 &pmeGpu->archSpecific->realGridSize[gridIndex],
366 &pmeGpu->archSpecific->realGridCapacity[gridIndex],
367 pmeGpu->archSpecific->deviceContext_);
368 kernelParamsPtr->grid.d_fourierGrid[gridIndex] = kernelParamsPtr->grid.d_realGrid[gridIndex];
369 pmeGpu->archSpecific->complexGridSize[gridIndex] =
370 pmeGpu->archSpecific->realGridSize[gridIndex];
371 // the size might get used later for copying the grid
372 }
373 }
374 }
375
pme_gpu_free_grids(const PmeGpu * pmeGpu)376 void pme_gpu_free_grids(const PmeGpu* pmeGpu)
377 {
378 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
379 {
380 if (pmeGpu->archSpecific->performOutOfPlaceFFT)
381 {
382 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_fourierGrid[gridIndex]);
383 }
384 freeDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex]);
385 }
386 }
387
pme_gpu_clear_grids(const PmeGpu * pmeGpu)388 void pme_gpu_clear_grids(const PmeGpu* pmeGpu)
389 {
390 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
391 {
392 clearDeviceBufferAsync(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
393 pmeGpu->archSpecific->realGridSize[gridIndex],
394 pmeGpu->archSpecific->pmeStream_);
395 }
396 }
397
pme_gpu_realloc_and_copy_fract_shifts(PmeGpu * pmeGpu)398 void pme_gpu_realloc_and_copy_fract_shifts(PmeGpu* pmeGpu)
399 {
400 pme_gpu_free_fract_shifts(pmeGpu);
401
402 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
403
404 const int nx = kernelParamsPtr->grid.realGridSize[XX];
405 const int ny = kernelParamsPtr->grid.realGridSize[YY];
406 const int nz = kernelParamsPtr->grid.realGridSize[ZZ];
407 const int cellCount = c_pmeNeighborUnitcellCount;
408 const int gridDataOffset[DIM] = { 0, cellCount * nx, cellCount * (nx + ny) };
409
410 memcpy(kernelParamsPtr->grid.tablesOffsets, &gridDataOffset, sizeof(gridDataOffset));
411
412 const int newFractShiftsSize = cellCount * (nx + ny + nz);
413
414 initParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
415 &kernelParamsPtr->fractShiftsTableTexture, pmeGpu->common->fsh.data(),
416 newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
417
418 initParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
419 &kernelParamsPtr->gridlineIndicesTableTexture, pmeGpu->common->nn.data(),
420 newFractShiftsSize, pmeGpu->archSpecific->deviceContext_);
421 }
422
pme_gpu_free_fract_shifts(const PmeGpu * pmeGpu)423 void pme_gpu_free_fract_shifts(const PmeGpu* pmeGpu)
424 {
425 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
426 #if GMX_GPU_CUDA
427 destroyParamLookupTable(&kernelParamsPtr->grid.d_fractShiftsTable,
428 kernelParamsPtr->fractShiftsTableTexture);
429 destroyParamLookupTable(&kernelParamsPtr->grid.d_gridlineIndicesTable,
430 kernelParamsPtr->gridlineIndicesTableTexture);
431 #elif GMX_GPU_OPENCL
432 freeDeviceBuffer(&kernelParamsPtr->grid.d_fractShiftsTable);
433 freeDeviceBuffer(&kernelParamsPtr->grid.d_gridlineIndicesTable);
434 #endif
435 }
436
pme_gpu_stream_query(const PmeGpu * pmeGpu)437 bool pme_gpu_stream_query(const PmeGpu* pmeGpu)
438 {
439 return haveStreamTasksCompleted(pmeGpu->archSpecific->pmeStream_);
440 }
441
pme_gpu_copy_input_gather_grid(const PmeGpu * pmeGpu,const float * h_grid,const int gridIndex)442 void pme_gpu_copy_input_gather_grid(const PmeGpu* pmeGpu, const float* h_grid, const int gridIndex)
443 {
444 copyToDeviceBuffer(&pmeGpu->kernelParams->grid.d_realGrid[gridIndex], h_grid, 0,
445 pmeGpu->archSpecific->realGridSize[gridIndex],
446 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
447 }
448
pme_gpu_copy_output_spread_grid(const PmeGpu * pmeGpu,float * h_grid,const int gridIndex)449 void pme_gpu_copy_output_spread_grid(const PmeGpu* pmeGpu, float* h_grid, const int gridIndex)
450 {
451 copyFromDeviceBuffer(h_grid, &pmeGpu->kernelParams->grid.d_realGrid[gridIndex], 0,
452 pmeGpu->archSpecific->realGridSize[gridIndex],
453 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
454 pmeGpu->archSpecific->syncSpreadGridD2H.markEvent(pmeGpu->archSpecific->pmeStream_);
455 }
456
pme_gpu_copy_output_spread_atom_data(const PmeGpu * pmeGpu)457 void pme_gpu_copy_output_spread_atom_data(const PmeGpu* pmeGpu)
458 {
459 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
460 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
461 copyFromDeviceBuffer(pmeGpu->staging.h_dtheta, &kernelParamsPtr->atoms.d_dtheta, 0, splinesCount,
462 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
463 copyFromDeviceBuffer(pmeGpu->staging.h_theta, &kernelParamsPtr->atoms.d_theta, 0, splinesCount,
464 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
465 copyFromDeviceBuffer(pmeGpu->staging.h_gridlineIndices, &kernelParamsPtr->atoms.d_gridlineIndices,
466 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
467 pmeGpu->settings.transferKind, nullptr);
468 }
469
pme_gpu_copy_input_gather_atom_data(const PmeGpu * pmeGpu)470 void pme_gpu_copy_input_gather_atom_data(const PmeGpu* pmeGpu)
471 {
472 const size_t splinesCount = DIM * pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order;
473 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
474
475 // TODO: could clear only the padding and not the whole thing, but this is a test-exclusive code anyway
476 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_gridlineIndices, 0, pmeGpu->nAtomsAlloc * DIM,
477 pmeGpu->archSpecific->pmeStream_);
478 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_dtheta, 0,
479 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
480 pmeGpu->archSpecific->pmeStream_);
481 clearDeviceBufferAsync(&kernelParamsPtr->atoms.d_theta, 0,
482 pmeGpu->nAtomsAlloc * pmeGpu->common->pme_order * DIM,
483 pmeGpu->archSpecific->pmeStream_);
484
485 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_dtheta, pmeGpu->staging.h_dtheta, 0, splinesCount,
486 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
487 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_theta, pmeGpu->staging.h_theta, 0, splinesCount,
488 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
489 copyToDeviceBuffer(&kernelParamsPtr->atoms.d_gridlineIndices, pmeGpu->staging.h_gridlineIndices,
490 0, kernelParamsPtr->atoms.nAtoms * DIM, pmeGpu->archSpecific->pmeStream_,
491 pmeGpu->settings.transferKind, nullptr);
492 }
493
pme_gpu_sync_spread_grid(const PmeGpu * pmeGpu)494 void pme_gpu_sync_spread_grid(const PmeGpu* pmeGpu)
495 {
496 pmeGpu->archSpecific->syncSpreadGridD2H.waitForEvent();
497 }
498
499 /*! \brief Internal GPU initialization for PME.
500 *
501 * \param[in] pmeGpu GPU PME data.
502 * \param[in] deviceContext GPU context.
503 * \param[in] deviceStream GPU stream.
504 */
pme_gpu_init_internal(PmeGpu * pmeGpu,const DeviceContext & deviceContext,const DeviceStream & deviceStream)505 static void pme_gpu_init_internal(PmeGpu* pmeGpu, const DeviceContext& deviceContext, const DeviceStream& deviceStream)
506 {
507 /* Allocate the target-specific structures */
508 pmeGpu->archSpecific.reset(new PmeGpuSpecific(deviceContext, deviceStream));
509 pmeGpu->kernelParams.reset(new PmeGpuKernelParams());
510
511 pmeGpu->archSpecific->performOutOfPlaceFFT = true;
512 /* This should give better performance, according to the cuFFT documentation.
513 * The performance seems to be the same though.
514 * TODO: PME could also try to pick up nice grid sizes (with factors of 2, 3, 5, 7).
515 */
516
517 #if GMX_GPU_CUDA
518 pmeGpu->maxGridWidthX = deviceContext.deviceInfo().prop.maxGridSize[0];
519 #else
520 // Use this path for any non-CUDA GPU acceleration
521 // TODO: is there no really global work size limit in OpenCL?
522 pmeGpu->maxGridWidthX = INT32_MAX / 2;
523 #endif
524 }
525
pme_gpu_reinit_3dfft(const PmeGpu * pmeGpu)526 void pme_gpu_reinit_3dfft(const PmeGpu* pmeGpu)
527 {
528 if (pme_gpu_settings(pmeGpu).performGPUFFT)
529 {
530 pmeGpu->archSpecific->fftSetup.resize(0);
531 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
532 {
533 pmeGpu->archSpecific->fftSetup.push_back(std::make_unique<GpuParallel3dFft>(pmeGpu, gridIndex));
534 }
535 }
536 }
537
pme_gpu_destroy_3dfft(const PmeGpu * pmeGpu)538 void pme_gpu_destroy_3dfft(const PmeGpu* pmeGpu)
539 {
540 pmeGpu->archSpecific->fftSetup.resize(0);
541 }
542
pme_gpu_getEnergyAndVirial(const gmx_pme_t & pme,const float lambda,PmeOutput * output)543 void pme_gpu_getEnergyAndVirial(const gmx_pme_t& pme, const float lambda, PmeOutput* output)
544 {
545 const PmeGpu* pmeGpu = pme.gpu;
546
547 GMX_ASSERT(lambda == 1.0 || pmeGpu->common->ngrids == 2,
548 "Invalid combination of lambda and number of grids");
549
550 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
551 {
552 for (int j = 0; j < c_virialAndEnergyCount; j++)
553 {
554 GMX_ASSERT(std::isfinite(pmeGpu->staging.h_virialAndEnergy[gridIndex][j]),
555 "PME GPU produces incorrect energy/virial.");
556 }
557 }
558 for (int dim1 = 0; dim1 < DIM; dim1++)
559 {
560 for (int dim2 = 0; dim2 < DIM; dim2++)
561 {
562 output->coulombVirial_[dim1][dim2] = 0;
563 }
564 }
565 output->coulombEnergy_ = 0;
566 float scale = 1.0;
567 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
568 {
569 if (pmeGpu->common->ngrids == 2)
570 {
571 scale = gridIndex == 0 ? (1.0 - lambda) : lambda;
572 }
573 output->coulombVirial_[XX][XX] +=
574 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][0];
575 output->coulombVirial_[YY][YY] +=
576 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][1];
577 output->coulombVirial_[ZZ][ZZ] +=
578 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][2];
579 output->coulombVirial_[XX][YY] +=
580 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
581 output->coulombVirial_[YY][XX] +=
582 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][3];
583 output->coulombVirial_[XX][ZZ] +=
584 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
585 output->coulombVirial_[ZZ][XX] +=
586 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][4];
587 output->coulombVirial_[YY][ZZ] +=
588 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
589 output->coulombVirial_[ZZ][YY] +=
590 scale * 0.25F * pmeGpu->staging.h_virialAndEnergy[gridIndex][5];
591 output->coulombEnergy_ += scale * 0.5F * pmeGpu->staging.h_virialAndEnergy[gridIndex][6];
592 }
593 if (pmeGpu->common->ngrids > 1)
594 {
595 output->coulombDvdl_ = 0.5F
596 * (pmeGpu->staging.h_virialAndEnergy[FEP_STATE_B][6]
597 - pmeGpu->staging.h_virialAndEnergy[FEP_STATE_A][6]);
598 }
599 }
600
601 /*! \brief Sets the force-related members in \p output
602 *
603 * \param[in] pmeGpu PME GPU data structure
604 * \param[out] output Pointer to PME output data structure
605 */
pme_gpu_getForceOutput(PmeGpu * pmeGpu,PmeOutput * output)606 static void pme_gpu_getForceOutput(PmeGpu* pmeGpu, PmeOutput* output)
607 {
608 output->haveForceOutput_ = !pmeGpu->settings.useGpuForceReduction;
609 if (output->haveForceOutput_)
610 {
611 output->forces_ = pmeGpu->staging.h_forces;
612 }
613 }
614
pme_gpu_getOutput(const gmx_pme_t & pme,const bool computeEnergyAndVirial,const real lambdaQ)615 PmeOutput pme_gpu_getOutput(const gmx_pme_t& pme, const bool computeEnergyAndVirial, const real lambdaQ)
616 {
617 PmeGpu* pmeGpu = pme.gpu;
618
619 PmeOutput output;
620
621 pme_gpu_getForceOutput(pmeGpu, &output);
622
623 if (computeEnergyAndVirial)
624 {
625 if (pme_gpu_settings(pmeGpu).performGPUSolve)
626 {
627 pme_gpu_getEnergyAndVirial(pme, lambdaQ, &output);
628 }
629 else
630 {
631 get_pme_ener_vir_q(pme.solve_work, pme.nthread, &output);
632 }
633 }
634 return output;
635 }
636
pme_gpu_update_input_box(PmeGpu gmx_unused * pmeGpu,const matrix gmx_unused box)637 void pme_gpu_update_input_box(PmeGpu gmx_unused* pmeGpu, const matrix gmx_unused box)
638 {
639 #if GMX_DOUBLE
640 GMX_THROW(gmx::NotImplementedError("PME is implemented for single-precision only on GPU"));
641 #else
642 matrix scaledBox;
643 pmeGpu->common->boxScaler->scaleBox(box, scaledBox);
644 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
645 kernelParamsPtr->current.boxVolume = scaledBox[XX][XX] * scaledBox[YY][YY] * scaledBox[ZZ][ZZ];
646 GMX_ASSERT(kernelParamsPtr->current.boxVolume != 0.0F, "Zero volume of the unit cell");
647 matrix recipBox;
648 gmx::invertBoxMatrix(scaledBox, recipBox);
649
650 /* The GPU recipBox is transposed as compared to the CPU recipBox.
651 * Spread uses matrix columns (while solve and gather use rows).
652 * There is no particular reason for this; it might be further rethought/optimized for better access patterns.
653 */
654 const real newRecipBox[DIM][DIM] = { { recipBox[XX][XX], recipBox[YY][XX], recipBox[ZZ][XX] },
655 { 0.0, recipBox[YY][YY], recipBox[ZZ][YY] },
656 { 0.0, 0.0, recipBox[ZZ][ZZ] } };
657 memcpy(kernelParamsPtr->current.recipBox, newRecipBox, sizeof(matrix));
658 #endif
659 }
660
661 /*! \brief \libinternal
662 * (Re-)initializes all the PME GPU data related to the grid size and cut-off.
663 *
664 * \param[in] pmeGpu The PME GPU structure.
665 */
pme_gpu_reinit_grids(PmeGpu * pmeGpu)666 static void pme_gpu_reinit_grids(PmeGpu* pmeGpu)
667 {
668 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
669
670 GMX_ASSERT(
671 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
672 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
673
674 kernelParamsPtr->grid.ewaldFactor =
675 (M_PI * M_PI) / (pmeGpu->common->ewaldcoeff_q * pmeGpu->common->ewaldcoeff_q);
676 /* The grid size variants */
677 for (int i = 0; i < DIM; i++)
678 {
679 kernelParamsPtr->grid.realGridSize[i] = pmeGpu->common->nk[i];
680 kernelParamsPtr->grid.realGridSizeFP[i] =
681 static_cast<float>(kernelParamsPtr->grid.realGridSize[i]);
682 kernelParamsPtr->grid.realGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
683
684 // The complex grid currently uses no padding;
685 // if it starts to do so, then another test should be added for that
686 kernelParamsPtr->grid.complexGridSize[i] = kernelParamsPtr->grid.realGridSize[i];
687 kernelParamsPtr->grid.complexGridSizePadded[i] = kernelParamsPtr->grid.realGridSize[i];
688 }
689 /* FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
690 if (!pme_gpu_settings(pmeGpu).performGPUFFT)
691 {
692 // This allows for GPU spreading grid and CPU fftgrid to have the same layout, so that we can copy the data directly
693 kernelParamsPtr->grid.realGridSizePadded[ZZ] =
694 (kernelParamsPtr->grid.realGridSize[ZZ] / 2 + 1) * 2;
695 }
696 /* GPU FFT: n real elements correspond to (n / 2 + 1) complex elements in minor dimension */
697 kernelParamsPtr->grid.complexGridSize[ZZ] /= 2;
698 kernelParamsPtr->grid.complexGridSize[ZZ]++;
699 kernelParamsPtr->grid.complexGridSizePadded[ZZ] = kernelParamsPtr->grid.complexGridSize[ZZ];
700
701 pme_gpu_realloc_and_copy_fract_shifts(pmeGpu);
702 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
703 {
704 pme_gpu_realloc_and_copy_bspline_values(pmeGpu, gridIndex);
705 }
706
707 pme_gpu_realloc_grids(pmeGpu);
708 pme_gpu_reinit_3dfft(pmeGpu);
709 }
710
711 /* Several GPU functions that refer to the CPU PME data live here.
712 * We would like to keep these away from the GPU-framework specific code for clarity,
713 * as well as compilation issues with MPI.
714 */
715
716 /*! \brief \libinternal
717 * Copies everything useful from the PME CPU to the PME GPU structure.
718 * The goal is to minimize interaction with the PME CPU structure in the GPU code.
719 *
720 * \param[in] pme The PME structure.
721 */
pme_gpu_copy_common_data_from(const gmx_pme_t * pme)722 static void pme_gpu_copy_common_data_from(const gmx_pme_t* pme)
723 {
724 /* TODO: Consider refactoring the CPU PME code to use the same structure,
725 * so that this function becomes 2 lines */
726 PmeGpu* pmeGpu = pme->gpu;
727 pmeGpu->common->ngrids = pme->bFEP_q ? 2 : 1;
728 pmeGpu->common->epsilon_r = pme->epsilon_r;
729 pmeGpu->common->ewaldcoeff_q = pme->ewaldcoeff_q;
730 pmeGpu->common->nk[XX] = pme->nkx;
731 pmeGpu->common->nk[YY] = pme->nky;
732 pmeGpu->common->nk[ZZ] = pme->nkz;
733 pmeGpu->common->pme_order = pme->pme_order;
734 if (pmeGpu->common->pme_order != c_pmeGpuOrder)
735 {
736 GMX_THROW(gmx::NotImplementedError("pme_order != 4 is not implemented!"));
737 }
738 for (int i = 0; i < DIM; i++)
739 {
740 pmeGpu->common->bsp_mod[i].assign(pme->bsp_mod[i], pme->bsp_mod[i] + pmeGpu->common->nk[i]);
741 }
742 const int cellCount = c_pmeNeighborUnitcellCount;
743 pmeGpu->common->fsh.resize(0);
744 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshx, pme->fshx + cellCount * pme->nkx);
745 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshy, pme->fshy + cellCount * pme->nky);
746 pmeGpu->common->fsh.insert(pmeGpu->common->fsh.end(), pme->fshz, pme->fshz + cellCount * pme->nkz);
747 pmeGpu->common->nn.resize(0);
748 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnx, pme->nnx + cellCount * pme->nkx);
749 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nny, pme->nny + cellCount * pme->nky);
750 pmeGpu->common->nn.insert(pmeGpu->common->nn.end(), pme->nnz, pme->nnz + cellCount * pme->nkz);
751 pmeGpu->common->runMode = pme->runMode;
752 pmeGpu->common->isRankPmeOnly = !pme->bPPnode;
753 pmeGpu->common->boxScaler = pme->boxScaler;
754 }
755
756 /*! \libinternal \brief
757 * uses heuristics to select the best performing PME gather and scatter kernels
758 *
759 * \param[in,out] pmeGpu The PME GPU structure.
760 */
pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu * pmeGpu)761 static void pme_gpu_select_best_performing_pme_spreadgather_kernels(PmeGpu* pmeGpu)
762 {
763 if (GMX_GPU_CUDA && pmeGpu->kernelParams->atoms.nAtoms > c_pmeGpuPerformanceAtomLimit)
764 {
765 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::Order;
766 pmeGpu->settings.recalculateSplines = true;
767 }
768 else
769 {
770 pmeGpu->settings.threadsPerAtom = ThreadsPerAtom::OrderSquared;
771 pmeGpu->settings.recalculateSplines = false;
772 }
773 }
774
775
776 /*! \libinternal \brief
777 * Initializes the PME GPU data at the beginning of the run.
778 * TODO: this should become PmeGpu::PmeGpu()
779 *
780 * \param[in,out] pme The PME structure.
781 * \param[in] deviceContext The GPU context.
782 * \param[in] deviceStream The GPU stream.
783 * \param[in,out] pmeGpuProgram The handle to the program/kernel data created outside (e.g. in unit tests/runner)
784 */
pme_gpu_init(gmx_pme_t * pme,const DeviceContext & deviceContext,const DeviceStream & deviceStream,const PmeGpuProgram * pmeGpuProgram)785 static void pme_gpu_init(gmx_pme_t* pme,
786 const DeviceContext& deviceContext,
787 const DeviceStream& deviceStream,
788 const PmeGpuProgram* pmeGpuProgram)
789 {
790 pme->gpu = new PmeGpu();
791 PmeGpu* pmeGpu = pme->gpu;
792 changePinningPolicy(&pmeGpu->staging.h_forces, pme_get_pinning_policy());
793 pmeGpu->common = std::make_shared<PmeShared>();
794
795 /* These settings are set here for the whole run; dynamic ones are set in pme_gpu_reinit() */
796 /* A convenience variable. */
797 pmeGpu->settings.useDecomposition = (pme->nnodes != 1);
798 /* TODO: CPU gather with GPU spread is broken due to different theta/dtheta layout. */
799 pmeGpu->settings.performGPUGather = true;
800 // By default GPU-side reduction is off (explicitly set here for tests, otherwise reset per-step)
801 pmeGpu->settings.useGpuForceReduction = false;
802
803 pme_gpu_set_testing(pmeGpu, false);
804
805 GMX_ASSERT(pmeGpuProgram != nullptr, "GPU kernels must be already compiled");
806 pmeGpu->programHandle_ = pmeGpuProgram;
807
808 pmeGpu->initializedClfftLibrary_ = std::make_unique<gmx::ClfftInitializer>();
809
810 pme_gpu_init_internal(pmeGpu, deviceContext, deviceStream);
811
812 pme_gpu_copy_common_data_from(pme);
813 pme_gpu_alloc_energy_virial(pmeGpu);
814
815 GMX_ASSERT(pmeGpu->common->epsilon_r != 0.0F, "PME GPU: bad electrostatic coefficient");
816
817 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
818 kernelParamsPtr->constants.elFactor = ONE_4PI_EPS0 / pmeGpu->common->epsilon_r;
819 }
820
pme_gpu_get_real_grid_sizes(const PmeGpu * pmeGpu,gmx::IVec * gridSize,gmx::IVec * paddedGridSize)821 void pme_gpu_get_real_grid_sizes(const PmeGpu* pmeGpu, gmx::IVec* gridSize, gmx::IVec* paddedGridSize)
822 {
823 GMX_ASSERT(gridSize != nullptr, "");
824 GMX_ASSERT(paddedGridSize != nullptr, "");
825 GMX_ASSERT(pmeGpu != nullptr, "");
826 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
827 for (int i = 0; i < DIM; i++)
828 {
829 (*gridSize)[i] = kernelParamsPtr->grid.realGridSize[i];
830 (*paddedGridSize)[i] = kernelParamsPtr->grid.realGridSizePadded[i];
831 }
832 }
833
pme_gpu_reinit(gmx_pme_t * pme,const DeviceContext * deviceContext,const DeviceStream * deviceStream,const PmeGpuProgram * pmeGpuProgram)834 void pme_gpu_reinit(gmx_pme_t* pme,
835 const DeviceContext* deviceContext,
836 const DeviceStream* deviceStream,
837 const PmeGpuProgram* pmeGpuProgram)
838 {
839 GMX_ASSERT(pme != nullptr, "Need valid PME object");
840
841 if (!pme->gpu)
842 {
843 GMX_RELEASE_ASSERT(deviceContext != nullptr,
844 "Device context can not be nullptr when setting up PME on GPU.");
845 GMX_RELEASE_ASSERT(deviceStream != nullptr,
846 "Device stream can not be nullptr when setting up PME on GPU.");
847 /* First-time initialization */
848 pme_gpu_init(pme, *deviceContext, *deviceStream, pmeGpuProgram);
849 }
850 else
851 {
852 /* After this call nothing in the GPU code should refer to the gmx_pme_t *pme itself - until the next pme_gpu_reinit */
853 pme_gpu_copy_common_data_from(pme);
854 }
855 /* GPU FFT will only get used for a single rank.*/
856 pme->gpu->settings.performGPUFFT =
857 (pme->gpu->common->runMode == PmeRunMode::GPU) && !pme->gpu->settings.useDecomposition;
858 pme->gpu->settings.performGPUSolve = (pme->gpu->common->runMode == PmeRunMode::GPU);
859
860 /* Reinit active timers */
861 pme_gpu_reinit_timings(pme->gpu);
862
863 pme_gpu_reinit_grids(pme->gpu);
864 // Note: if timing the reinit launch overhead becomes more relevant
865 // (e.g. with regulat PP-PME re-balancing), we should pass wcycle here.
866 pme_gpu_reinit_computation(pme, nullptr);
867 /* Clear the previous box - doesn't hurt, and forces the PME CPU recipbox
868 * update for mixed mode on grid switch. TODO: use shared recipbox field.
869 */
870 std::memset(pme->gpu->common->previousBox, 0, sizeof(pme->gpu->common->previousBox));
871 }
872
pme_gpu_destroy(PmeGpu * pmeGpu)873 void pme_gpu_destroy(PmeGpu* pmeGpu)
874 {
875 /* Free lots of data */
876 pme_gpu_free_energy_virial(pmeGpu);
877 pme_gpu_free_bspline_values(pmeGpu);
878 pme_gpu_free_forces(pmeGpu);
879 pme_gpu_free_coefficients(pmeGpu);
880 pme_gpu_free_spline_data(pmeGpu);
881 pme_gpu_free_grid_indices(pmeGpu);
882 pme_gpu_free_fract_shifts(pmeGpu);
883 pme_gpu_free_grids(pmeGpu);
884
885 pme_gpu_destroy_3dfft(pmeGpu);
886
887 delete pmeGpu;
888 }
889
pme_gpu_reinit_atoms(PmeGpu * pmeGpu,const int nAtoms,const real * chargesA,const real * chargesB)890 void pme_gpu_reinit_atoms(PmeGpu* pmeGpu, const int nAtoms, const real* chargesA, const real* chargesB)
891 {
892 auto* kernelParamsPtr = pme_gpu_get_kernel_params_base_ptr(pmeGpu);
893 kernelParamsPtr->atoms.nAtoms = nAtoms;
894 const int block_size = pme_gpu_get_atom_data_block_size();
895 const int nAtomsNewPadded = ((nAtoms + block_size - 1) / block_size) * block_size;
896 const bool haveToRealloc = (pmeGpu->nAtomsAlloc < nAtomsNewPadded);
897 pmeGpu->nAtomsAlloc = nAtomsNewPadded;
898
899 #if GMX_DOUBLE
900 GMX_RELEASE_ASSERT(false, "Only single precision supported");
901 GMX_UNUSED_VALUE(charges);
902 #else
903 int gridIndex = 0;
904 /* Could also be checked for haveToRealloc, but the copy always needs to be performed */
905 pme_gpu_realloc_and_copy_input_coefficients(pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
906 gridIndex++;
907 if (chargesB != nullptr)
908 {
909 pme_gpu_realloc_and_copy_input_coefficients(
910 pmeGpu, reinterpret_cast<const float*>(chargesB), gridIndex);
911 }
912 else
913 {
914 /* Fill the second set of coefficients with chargesA as well to be able to avoid
915 * conditionals in the GPU kernels */
916 /* FIXME: This should be avoided by making a separate templated version of the
917 * relevant kernel(s) (probably only pme_gather_kernel). That would require a
918 * reduction of the current number of templated parameters of that kernel. */
919 pme_gpu_realloc_and_copy_input_coefficients(
920 pmeGpu, reinterpret_cast<const float*>(chargesA), gridIndex);
921 }
922 #endif
923
924 if (haveToRealloc)
925 {
926 pme_gpu_realloc_forces(pmeGpu);
927 pme_gpu_realloc_spline_data(pmeGpu);
928 pme_gpu_realloc_grid_indices(pmeGpu);
929 }
930 pme_gpu_select_best_performing_pme_spreadgather_kernels(pmeGpu);
931 }
932
933 /*! \internal \brief
934 * Returns raw timing event from the corresponding GpuRegionTimer (if timings are enabled).
935 * In CUDA result can be nullptr stub, per GpuRegionTimer implementation.
936 *
937 * \param[in] pmeGpu The PME GPU data structure.
938 * \param[in] PMEStageId The PME GPU stage gtPME_ index from the enum in src/gromacs/timing/gpu_timing.h
939 */
pme_gpu_fetch_timing_event(const PmeGpu * pmeGpu,size_t PMEStageId)940 static CommandEvent* pme_gpu_fetch_timing_event(const PmeGpu* pmeGpu, size_t PMEStageId)
941 {
942 CommandEvent* timingEvent = nullptr;
943 if (pme_gpu_timings_enabled(pmeGpu))
944 {
945 GMX_ASSERT(PMEStageId < pmeGpu->archSpecific->timingEvents.size(),
946 "Wrong PME GPU timing event index");
947 timingEvent = pmeGpu->archSpecific->timingEvents[PMEStageId].fetchNextEvent();
948 }
949 return timingEvent;
950 }
951
pme_gpu_3dfft(const PmeGpu * pmeGpu,gmx_fft_direction dir,const int grid_index)952 void pme_gpu_3dfft(const PmeGpu* pmeGpu, gmx_fft_direction dir, const int grid_index)
953 {
954 int timerId = (dir == GMX_FFT_REAL_TO_COMPLEX) ? gtPME_FFT_R2C : gtPME_FFT_C2R;
955
956 pme_gpu_start_timing(pmeGpu, timerId);
957 pmeGpu->archSpecific->fftSetup[grid_index]->perform3dFft(
958 dir, pme_gpu_fetch_timing_event(pmeGpu, timerId));
959 pme_gpu_stop_timing(pmeGpu, timerId);
960 }
961
962 /*! \brief
963 * Given possibly large \p blockCount, returns a compact 1D or 2D grid for kernel scheduling,
964 * to minimize number of unused blocks.
965 */
pmeGpuCreateGrid(const PmeGpu * pmeGpu,int blockCount)966 std::pair<int, int> inline pmeGpuCreateGrid(const PmeGpu* pmeGpu, int blockCount)
967 {
968 // How many maximum widths in X do we need (hopefully just one)
969 const int minRowCount = (blockCount + pmeGpu->maxGridWidthX - 1) / pmeGpu->maxGridWidthX;
970 // Trying to make things even
971 const int colCount = (blockCount + minRowCount - 1) / minRowCount;
972 GMX_ASSERT((colCount * minRowCount - blockCount) >= 0, "pmeGpuCreateGrid: totally wrong");
973 GMX_ASSERT((colCount * minRowCount - blockCount) < minRowCount,
974 "pmeGpuCreateGrid: excessive blocks");
975 return std::pair<int, int>(colCount, minRowCount);
976 }
977
978 /*! \brief
979 * Returns a pointer to appropriate spline and spread kernel based on the input bool values
980 *
981 * \param[in] pmeGpu The PME GPU structure.
982 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
983 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
984 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
985 *
986 * \return Pointer to CUDA kernel
987 */
selectSplineAndSpreadKernelPtr(const PmeGpu * pmeGpu,ThreadsPerAtom threadsPerAtom,bool writeSplinesToGlobal,const int numGrids)988 static auto selectSplineAndSpreadKernelPtr(const PmeGpu* pmeGpu,
989 ThreadsPerAtom threadsPerAtom,
990 bool writeSplinesToGlobal,
991 const int numGrids)
992 {
993 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
994 if (writeSplinesToGlobal)
995 {
996 if (threadsPerAtom == ThreadsPerAtom::Order)
997 {
998 if (numGrids == 2)
999 {
1000 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Dual;
1001 }
1002 else
1003 {
1004 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesThPerAtom4Single;
1005 }
1006 }
1007 else
1008 {
1009 if (numGrids == 2)
1010 {
1011 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesDual;
1012 }
1013 else
1014 {
1015 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelWriteSplinesSingle;
1016 }
1017 }
1018 }
1019 else
1020 {
1021 if (threadsPerAtom == ThreadsPerAtom::Order)
1022 {
1023 if (numGrids == 2)
1024 {
1025 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1026 }
1027 else
1028 {
1029 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1030 }
1031 }
1032 else
1033 {
1034 if (numGrids == 2)
1035 {
1036 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1037 }
1038 else
1039 {
1040 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1041 }
1042 }
1043 }
1044
1045 return kernelPtr;
1046 }
1047
1048 /*! \brief
1049 * Returns a pointer to appropriate spline kernel based on the input bool values
1050 *
1051 * \param[in] pmeGpu The PME GPU structure.
1052 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1053 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1054 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1055 *
1056 * \return Pointer to CUDA kernel
1057 */
selectSplineKernelPtr(const PmeGpu * pmeGpu,ThreadsPerAtom threadsPerAtom,bool gmx_unused writeSplinesToGlobal,const int numGrids)1058 static auto selectSplineKernelPtr(const PmeGpu* pmeGpu,
1059 ThreadsPerAtom threadsPerAtom,
1060 bool gmx_unused writeSplinesToGlobal,
1061 const int numGrids)
1062 {
1063 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1064 GMX_ASSERT(
1065 writeSplinesToGlobal,
1066 "Spline data should always be written to global memory when just calculating splines");
1067
1068 if (threadsPerAtom == ThreadsPerAtom::Order)
1069 {
1070 if (numGrids == 2)
1071 {
1072 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Dual;
1073 }
1074 else
1075 {
1076 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelThPerAtom4Single;
1077 }
1078 }
1079 else
1080 {
1081 if (numGrids == 2)
1082 {
1083 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelDual;
1084 }
1085 else
1086 {
1087 kernelPtr = pmeGpu->programHandle_->impl_->splineKernelSingle;
1088 }
1089 }
1090 return kernelPtr;
1091 }
1092
1093 /*! \brief
1094 * Returns a pointer to appropriate spread kernel based on the input bool values
1095 *
1096 * \param[in] pmeGpu The PME GPU structure.
1097 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1098 * \param[in] writeSplinesToGlobal bool controlling if we should write spline data to global memory
1099 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1100 *
1101 * \return Pointer to CUDA kernel
1102 */
selectSpreadKernelPtr(const PmeGpu * pmeGpu,ThreadsPerAtom threadsPerAtom,bool writeSplinesToGlobal,const int numGrids)1103 static auto selectSpreadKernelPtr(const PmeGpu* pmeGpu,
1104 ThreadsPerAtom threadsPerAtom,
1105 bool writeSplinesToGlobal,
1106 const int numGrids)
1107 {
1108 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1109 if (writeSplinesToGlobal)
1110 {
1111 if (threadsPerAtom == ThreadsPerAtom::Order)
1112 {
1113 if (numGrids == 2)
1114 {
1115 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Dual;
1116 }
1117 else
1118 {
1119 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelThPerAtom4Single;
1120 }
1121 }
1122 else
1123 {
1124 if (numGrids == 2)
1125 {
1126 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelDual;
1127 }
1128 else
1129 {
1130 kernelPtr = pmeGpu->programHandle_->impl_->spreadKernelSingle;
1131 }
1132 }
1133 }
1134 else
1135 {
1136 /* if we are not saving the spline data we need to recalculate it
1137 using the spline and spread Kernel */
1138 if (threadsPerAtom == ThreadsPerAtom::Order)
1139 {
1140 if (numGrids == 2)
1141 {
1142 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Dual;
1143 }
1144 else
1145 {
1146 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelThPerAtom4Single;
1147 }
1148 }
1149 else
1150 {
1151 if (numGrids == 2)
1152 {
1153 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelDual;
1154 }
1155 else
1156 {
1157 kernelPtr = pmeGpu->programHandle_->impl_->splineAndSpreadKernelSingle;
1158 }
1159 }
1160 }
1161 return kernelPtr;
1162 }
1163
pme_gpu_spread(const PmeGpu * pmeGpu,GpuEventSynchronizer * xReadyOnDevice,real ** h_grids,bool computeSplines,bool spreadCharges,const real lambda)1164 void pme_gpu_spread(const PmeGpu* pmeGpu,
1165 GpuEventSynchronizer* xReadyOnDevice,
1166 real** h_grids,
1167 bool computeSplines,
1168 bool spreadCharges,
1169 const real lambda)
1170 {
1171 GMX_ASSERT(
1172 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1173 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1174
1175 GMX_ASSERT(computeSplines || spreadCharges,
1176 "PME spline/spread kernel has invalid input (nothing to do)");
1177 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1178 GMX_ASSERT(kernelParamsPtr->atoms.nAtoms > 0, "No atom data in PME GPU spread");
1179
1180 const size_t blockSize = pmeGpu->programHandle_->impl_->spreadWorkGroupSize;
1181
1182 const int order = pmeGpu->common->pme_order;
1183 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1184 const bool writeGlobal = pmeGpu->settings.copyAllOutputs;
1185 const int threadsPerAtom =
1186 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1187 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1188
1189 GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1190 "Only 16 threads per atom supported in OpenCL");
1191 GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1192 "Recalculating splines not supported in OpenCL");
1193
1194 const int atomsPerBlock = blockSize / threadsPerAtom;
1195
1196 // TODO: pick smaller block size in runtime if needed
1197 // (e.g. on 660 Ti where 50% occupancy is ~25% faster than 100% occupancy with RNAse (~17.8k atoms))
1198 // If doing so, change atomsPerBlock in the kernels as well.
1199 // TODO: test varying block sizes on modern arch-s as well
1200 // TODO: also consider using cudaFuncSetCacheConfig() for preferring shared memory on older architectures
1201 //(for spline data mostly)
1202 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1203 "inconsistent atom data padding vs. spreading block size");
1204
1205 // Ensure that coordinates are ready on the device before launching spread;
1206 // only needed with CUDA on PP+PME ranks, not on separate PME ranks, in unit tests
1207 // nor in OpenCL as these cases use a single stream (hence xReadyOnDevice == nullptr).
1208 GMX_ASSERT(!GMX_GPU_CUDA || xReadyOnDevice != nullptr || pmeGpu->common->isRankPmeOnly
1209 || pme_gpu_settings(pmeGpu).copyAllOutputs,
1210 "Need a valid coordinate synchronizer on PP+PME ranks with CUDA.");
1211
1212 if (xReadyOnDevice)
1213 {
1214 xReadyOnDevice->enqueueWaitEvent(pmeGpu->archSpecific->pmeStream_);
1215 }
1216
1217 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1218 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1219
1220 if (pmeGpu->common->ngrids == 1)
1221 {
1222 kernelParamsPtr->current.scale = 1.0;
1223 }
1224 else
1225 {
1226 kernelParamsPtr->current.scale = 1.0 - lambda;
1227 }
1228
1229 KernelLaunchConfig config;
1230 config.blockSize[0] = order;
1231 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1232 config.blockSize[2] = atomsPerBlock;
1233 config.gridSize[0] = dimGrid.first;
1234 config.gridSize[1] = dimGrid.second;
1235
1236 int timingId;
1237 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1238 if (computeSplines)
1239 {
1240 if (spreadCharges)
1241 {
1242 timingId = gtPME_SPLINEANDSPREAD;
1243 kernelPtr = selectSplineAndSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1244 writeGlobal || (!recalculateSplines),
1245 pmeGpu->common->ngrids);
1246 }
1247 else
1248 {
1249 timingId = gtPME_SPLINE;
1250 kernelPtr = selectSplineKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1251 writeGlobal || (!recalculateSplines),
1252 pmeGpu->common->ngrids);
1253 }
1254 }
1255 else
1256 {
1257 timingId = gtPME_SPREAD;
1258 kernelPtr = selectSpreadKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1259 writeGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1260 }
1261
1262
1263 pme_gpu_start_timing(pmeGpu, timingId);
1264 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1265 #if c_canEmbedBuffers
1266 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1267 #else
1268 const auto kernelArgs = prepareGpuKernelArguments(
1269 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_theta,
1270 &kernelParamsPtr->atoms.d_dtheta, &kernelParamsPtr->atoms.d_gridlineIndices,
1271 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1272 &kernelParamsPtr->grid.d_fractShiftsTable, &kernelParamsPtr->grid.d_gridlineIndicesTable,
1273 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1274 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B], &kernelParamsPtr->atoms.d_coordinates);
1275 #endif
1276
1277 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent,
1278 "PME spline/spread", kernelArgs);
1279 pme_gpu_stop_timing(pmeGpu, timingId);
1280
1281 const auto& settings = pmeGpu->settings;
1282 const bool copyBackGrid = spreadCharges && (!settings.performGPUFFT || settings.copyAllOutputs);
1283 if (copyBackGrid)
1284 {
1285 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1286 {
1287 float* h_grid = h_grids[gridIndex];
1288 pme_gpu_copy_output_spread_grid(pmeGpu, h_grid, gridIndex);
1289 }
1290 }
1291 const bool copyBackAtomData =
1292 computeSplines && (!settings.performGPUGather || settings.copyAllOutputs);
1293 if (copyBackAtomData)
1294 {
1295 pme_gpu_copy_output_spread_atom_data(pmeGpu);
1296 }
1297 }
1298
pme_gpu_solve(const PmeGpu * pmeGpu,const int gridIndex,t_complex * h_grid,GridOrdering gridOrdering,bool computeEnergyAndVirial)1299 void pme_gpu_solve(const PmeGpu* pmeGpu,
1300 const int gridIndex,
1301 t_complex* h_grid,
1302 GridOrdering gridOrdering,
1303 bool computeEnergyAndVirial)
1304 {
1305 GMX_ASSERT(
1306 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1307 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1308 GMX_ASSERT(gridIndex < pmeGpu->common->ngrids,
1309 "Invalid combination of gridIndex and number of grids");
1310
1311 const auto& settings = pmeGpu->settings;
1312 const bool copyInputAndOutputGrid = !settings.performGPUFFT || settings.copyAllOutputs;
1313
1314 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1315
1316 float* h_gridFloat = reinterpret_cast<float*>(h_grid);
1317 if (copyInputAndOutputGrid)
1318 {
1319 copyToDeviceBuffer(&kernelParamsPtr->grid.d_fourierGrid[gridIndex], h_gridFloat, 0,
1320 pmeGpu->archSpecific->complexGridSize[gridIndex],
1321 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1322 }
1323
1324 int majorDim = -1, middleDim = -1, minorDim = -1;
1325 switch (gridOrdering)
1326 {
1327 case GridOrdering::YZX:
1328 majorDim = YY;
1329 middleDim = ZZ;
1330 minorDim = XX;
1331 break;
1332
1333 case GridOrdering::XYZ:
1334 majorDim = XX;
1335 middleDim = YY;
1336 minorDim = ZZ;
1337 break;
1338
1339 default: GMX_ASSERT(false, "Implement grid ordering here and below for the kernel launch");
1340 }
1341
1342 const int maxBlockSize = pmeGpu->programHandle_->impl_->solveMaxWorkGroupSize;
1343
1344 const int gridLineSize = pmeGpu->kernelParams->grid.complexGridSize[minorDim];
1345 const int gridLinesPerBlock = std::max(maxBlockSize / gridLineSize, 1);
1346 const int blocksPerGridLine = (gridLineSize + maxBlockSize - 1) / maxBlockSize;
1347 int cellsPerBlock;
1348 if (blocksPerGridLine == 1)
1349 {
1350 cellsPerBlock = gridLineSize * gridLinesPerBlock;
1351 }
1352 else
1353 {
1354 cellsPerBlock = (gridLineSize + blocksPerGridLine - 1) / blocksPerGridLine;
1355 }
1356 const int warpSize = pmeGpu->programHandle_->warpSize();
1357 const int blockSize = (cellsPerBlock + warpSize - 1) / warpSize * warpSize;
1358
1359 static_assert(!GMX_GPU_CUDA || c_solveMaxWarpsPerBlock / 2 >= 4,
1360 "The CUDA solve energy kernels needs at least 4 warps. "
1361 "Here we launch at least half of the max warps.");
1362
1363 KernelLaunchConfig config;
1364 config.blockSize[0] = blockSize;
1365 config.gridSize[0] = blocksPerGridLine;
1366 // rounding up to full warps so that shuffle operations produce defined results
1367 config.gridSize[1] = (pmeGpu->kernelParams->grid.complexGridSize[middleDim] + gridLinesPerBlock - 1)
1368 / gridLinesPerBlock;
1369 config.gridSize[2] = pmeGpu->kernelParams->grid.complexGridSize[majorDim];
1370
1371 int timingId = gtPME_SOLVE;
1372 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1373 if (gridOrdering == GridOrdering::YZX)
1374 {
1375 if (gridIndex == 0)
1376 {
1377 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelA
1378 : pmeGpu->programHandle_->impl_->solveYZXKernelA;
1379 }
1380 else
1381 {
1382 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveYZXEnergyKernelB
1383 : pmeGpu->programHandle_->impl_->solveYZXKernelB;
1384 }
1385 }
1386 else if (gridOrdering == GridOrdering::XYZ)
1387 {
1388 if (gridIndex == 0)
1389 {
1390 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelA
1391 : pmeGpu->programHandle_->impl_->solveXYZKernelA;
1392 }
1393 else
1394 {
1395 kernelPtr = computeEnergyAndVirial ? pmeGpu->programHandle_->impl_->solveXYZEnergyKernelB
1396 : pmeGpu->programHandle_->impl_->solveXYZKernelB;
1397 }
1398 }
1399
1400 pme_gpu_start_timing(pmeGpu, timingId);
1401 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1402 #if c_canEmbedBuffers
1403 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1404 #else
1405 const auto kernelArgs = prepareGpuKernelArguments(
1406 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->grid.d_splineModuli[gridIndex],
1407 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex],
1408 &kernelParamsPtr->grid.d_fourierGrid[gridIndex]);
1409 #endif
1410 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME solve",
1411 kernelArgs);
1412 pme_gpu_stop_timing(pmeGpu, timingId);
1413
1414 if (computeEnergyAndVirial)
1415 {
1416 copyFromDeviceBuffer(pmeGpu->staging.h_virialAndEnergy[gridIndex],
1417 &kernelParamsPtr->constants.d_virialAndEnergy[gridIndex], 0,
1418 c_virialAndEnergyCount, pmeGpu->archSpecific->pmeStream_,
1419 pmeGpu->settings.transferKind, nullptr);
1420 }
1421
1422 if (copyInputAndOutputGrid)
1423 {
1424 copyFromDeviceBuffer(h_gridFloat, &kernelParamsPtr->grid.d_fourierGrid[gridIndex], 0,
1425 pmeGpu->archSpecific->complexGridSize[gridIndex],
1426 pmeGpu->archSpecific->pmeStream_, pmeGpu->settings.transferKind, nullptr);
1427 }
1428 }
1429
1430 /*! \brief
1431 * Returns a pointer to appropriate gather kernel based on the inputvalues
1432 *
1433 * \param[in] pmeGpu The PME GPU structure.
1434 * \param[in] threadsPerAtom Controls whether we should use order or order*order threads per atom
1435 * \param[in] readSplinesFromGlobal bool controlling if we should write spline data to global memory
1436 * \param[in] numGrids Number of grids to use. numGrids == 2 if Coulomb is perturbed.
1437 *
1438 * \return Pointer to CUDA kernel
1439 */
selectGatherKernelPtr(const PmeGpu * pmeGpu,ThreadsPerAtom threadsPerAtom,bool readSplinesFromGlobal,const int numGrids)1440 inline auto selectGatherKernelPtr(const PmeGpu* pmeGpu,
1441 ThreadsPerAtom threadsPerAtom,
1442 bool readSplinesFromGlobal,
1443 const int numGrids)
1444
1445 {
1446 PmeGpuProgramImpl::PmeKernelHandle kernelPtr = nullptr;
1447
1448 if (readSplinesFromGlobal)
1449 {
1450 if (threadsPerAtom == ThreadsPerAtom::Order)
1451 {
1452 if (numGrids == 2)
1453 {
1454 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Dual;
1455 }
1456 else
1457 {
1458 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesThPerAtom4Single;
1459 }
1460 }
1461 else
1462 {
1463 if (numGrids == 2)
1464 {
1465 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesDual;
1466 }
1467 else
1468 {
1469 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelReadSplinesSingle;
1470 }
1471 }
1472 }
1473 else
1474 {
1475 if (threadsPerAtom == ThreadsPerAtom::Order)
1476 {
1477 if (numGrids == 2)
1478 {
1479 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Dual;
1480 }
1481 else
1482 {
1483 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelThPerAtom4Single;
1484 }
1485 }
1486 else
1487 {
1488 if (numGrids == 2)
1489 {
1490 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelDual;
1491 }
1492 else
1493 {
1494 kernelPtr = pmeGpu->programHandle_->impl_->gatherKernelSingle;
1495 }
1496 }
1497 }
1498 return kernelPtr;
1499 }
1500
pme_gpu_gather(PmeGpu * pmeGpu,real ** h_grids,const float lambda)1501 void pme_gpu_gather(PmeGpu* pmeGpu, real** h_grids, const float lambda)
1502 {
1503 GMX_ASSERT(
1504 pmeGpu->common->ngrids == 1 || pmeGpu->common->ngrids == 2,
1505 "Only one (normal Coulomb PME) or two (FEP coulomb PME) PME grids can be used on GPU");
1506
1507 const auto& settings = pmeGpu->settings;
1508
1509 if (!settings.performGPUFFT || settings.copyAllOutputs)
1510 {
1511 for (int gridIndex = 0; gridIndex < pmeGpu->common->ngrids; gridIndex++)
1512 {
1513 float* h_grid = const_cast<float*>(h_grids[gridIndex]);
1514 pme_gpu_copy_input_gather_grid(pmeGpu, h_grid, gridIndex);
1515 }
1516 }
1517
1518 if (settings.copyAllOutputs)
1519 {
1520 pme_gpu_copy_input_gather_atom_data(pmeGpu);
1521 }
1522
1523 /* Set if we have unit tests */
1524 const bool readGlobal = pmeGpu->settings.copyAllOutputs;
1525 const size_t blockSize = pmeGpu->programHandle_->impl_->gatherWorkGroupSize;
1526 const int order = pmeGpu->common->pme_order;
1527 GMX_ASSERT(order == c_pmeGpuOrder, "Only PME order 4 is implemented");
1528 const int threadsPerAtom =
1529 (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? order : order * order);
1530 const bool recalculateSplines = pmeGpu->settings.recalculateSplines;
1531
1532 GMX_ASSERT(!GMX_GPU_OPENCL || pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::OrderSquared,
1533 "Only 16 threads per atom supported in OpenCL");
1534 GMX_ASSERT(!GMX_GPU_OPENCL || !recalculateSplines,
1535 "Recalculating splines not supported in OpenCL");
1536
1537 const int atomsPerBlock = blockSize / threadsPerAtom;
1538
1539 GMX_ASSERT(!(c_pmeAtomDataBlockSize % atomsPerBlock),
1540 "inconsistent atom data padding vs. gathering block size");
1541
1542 const int blockCount = pmeGpu->nAtomsAlloc / atomsPerBlock;
1543 auto dimGrid = pmeGpuCreateGrid(pmeGpu, blockCount);
1544
1545 KernelLaunchConfig config;
1546 config.blockSize[0] = order;
1547 config.blockSize[1] = (pmeGpu->settings.threadsPerAtom == ThreadsPerAtom::Order ? 1 : order);
1548 config.blockSize[2] = atomsPerBlock;
1549 config.gridSize[0] = dimGrid.first;
1550 config.gridSize[1] = dimGrid.second;
1551
1552 // TODO test different cache configs
1553
1554 int timingId = gtPME_GATHER;
1555 PmeGpuProgramImpl::PmeKernelHandle kernelPtr =
1556 selectGatherKernelPtr(pmeGpu, pmeGpu->settings.threadsPerAtom,
1557 readGlobal || (!recalculateSplines), pmeGpu->common->ngrids);
1558 // TODO design kernel selection getters and make PmeGpu a friend of PmeGpuProgramImpl
1559
1560 pme_gpu_start_timing(pmeGpu, timingId);
1561 auto* timingEvent = pme_gpu_fetch_timing_event(pmeGpu, timingId);
1562 auto* kernelParamsPtr = pmeGpu->kernelParams.get();
1563 if (pmeGpu->common->ngrids == 1)
1564 {
1565 kernelParamsPtr->current.scale = 1.0;
1566 }
1567 else
1568 {
1569 kernelParamsPtr->current.scale = 1.0 - lambda;
1570 }
1571
1572 #if c_canEmbedBuffers
1573 const auto kernelArgs = prepareGpuKernelArguments(kernelPtr, config, kernelParamsPtr);
1574 #else
1575 const auto kernelArgs = prepareGpuKernelArguments(
1576 kernelPtr, config, kernelParamsPtr, &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_A],
1577 &kernelParamsPtr->atoms.d_coefficients[FEP_STATE_B],
1578 &kernelParamsPtr->grid.d_realGrid[FEP_STATE_A], &kernelParamsPtr->grid.d_realGrid[FEP_STATE_B],
1579 &kernelParamsPtr->atoms.d_theta, &kernelParamsPtr->atoms.d_dtheta,
1580 &kernelParamsPtr->atoms.d_gridlineIndices, &kernelParamsPtr->atoms.d_forces);
1581 #endif
1582 launchGpuKernel(kernelPtr, config, pmeGpu->archSpecific->pmeStream_, timingEvent, "PME gather",
1583 kernelArgs);
1584 pme_gpu_stop_timing(pmeGpu, timingId);
1585
1586 if (pmeGpu->settings.useGpuForceReduction)
1587 {
1588 pmeGpu->archSpecific->pmeForcesReady.markEvent(pmeGpu->archSpecific->pmeStream_);
1589 }
1590 else
1591 {
1592 pme_gpu_copy_output_forces(pmeGpu);
1593 }
1594 }
1595
pme_gpu_get_kernelparam_forces(const PmeGpu * pmeGpu)1596 void* pme_gpu_get_kernelparam_forces(const PmeGpu* pmeGpu)
1597 {
1598 if (pmeGpu && pmeGpu->kernelParams)
1599 {
1600 return pmeGpu->kernelParams->atoms.d_forces;
1601 }
1602 else
1603 {
1604 return nullptr;
1605 }
1606 }
1607
pme_gpu_set_kernelparam_coordinates(const PmeGpu * pmeGpu,DeviceBuffer<gmx::RVec> d_x)1608 void pme_gpu_set_kernelparam_coordinates(const PmeGpu* pmeGpu, DeviceBuffer<gmx::RVec> d_x)
1609 {
1610 GMX_ASSERT(pmeGpu && pmeGpu->kernelParams,
1611 "PME GPU device buffer can not be set in non-GPU builds or before the GPU PME was "
1612 "initialized.");
1613
1614 GMX_ASSERT(checkDeviceBuffer(d_x, pmeGpu->kernelParams->atoms.nAtoms),
1615 "The device-side buffer can not be set.");
1616
1617 pmeGpu->kernelParams->atoms.d_coordinates = d_x;
1618 }
1619
pme_gpu_get_forces_ready_synchronizer(const PmeGpu * pmeGpu)1620 GpuEventSynchronizer* pme_gpu_get_forces_ready_synchronizer(const PmeGpu* pmeGpu)
1621 {
1622 if (pmeGpu && pmeGpu->kernelParams)
1623 {
1624 return &pmeGpu->archSpecific->pmeForcesReady;
1625 }
1626 else
1627 {
1628 return nullptr;
1629 }
1630 }
1631