1 /* 2 * This file is part of the GROMACS molecular simulation package. 3 * 4 * Copyright (c) 2018,2019,2020, by the GROMACS development team, led by 5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl, 6 * and including many others, as listed in the AUTHORS file in the 7 * top-level source directory and at http://www.gromacs.org. 8 * 9 * GROMACS is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU Lesser General Public License 11 * as published by the Free Software Foundation; either version 2.1 12 * of the License, or (at your option) any later version. 13 * 14 * GROMACS is distributed in the hope that it will be useful, 15 * but WITHOUT ANY WARRANTY; without even the implied warranty of 16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 17 * Lesser General Public License for more details. 18 * 19 * You should have received a copy of the GNU Lesser General Public 20 * License along with GROMACS; if not, see 21 * http://www.gnu.org/licenses, or write to the Free Software Foundation, 22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 23 * 24 * If you want to redistribute modifications to GROMACS, please 25 * consider that scientific software is very special. Version 26 * control is crucial - bugs must be traceable. We will be happy to 27 * consider code for inclusion in the official distribution, but 28 * derived work must not be called official GROMACS. Details are found 29 * in the README & COPYING files - if they are missing, get the 30 * official version at http://www.gromacs.org. 31 * 32 * To help us fund GROMACS development, we humbly ask that you cite 33 * the research papers on the package. Check out http://www.gromacs.org. 34 */ 35 36 /*! \internal \file 37 * \brief Defines the host-side PME GPU data structure, which is dependent on the GPU types. 38 * It's included by pointer in the general PmeGpu host structure in pme_gpu_types_host.h. 39 * 40 * \author Aleksei Iupinov <a.yupinov@gmail.com> 41 * \ingroup module_ewald 42 */ 43 44 #ifndef PMEGPUTYPESHOSTIMPL_H 45 #define PMEGPUTYPESHOSTIMPL_H 46 47 #include "config.h" 48 49 #include <array> 50 #include <set> 51 #include <vector> 52 53 #if GMX_GPU_CUDA 54 # include "gromacs/gpu_utils/gpueventsynchronizer.cuh" 55 # include "gromacs/gpu_utils/gpuregiontimer.cuh" 56 #elif GMX_GPU_OPENCL 57 # include "gromacs/gpu_utils/gpueventsynchronizer_ocl.h" 58 # include "gromacs/gpu_utils/gpuregiontimer_ocl.h" 59 #endif 60 61 #include "gromacs/timing/gpu_timing.h" // for gtPME_EVENT_COUNT 62 63 #include "pme_gpu_3dfft.h" 64 65 #ifndef NUMFEPSTATES 66 //! Number of FEP states. 67 # define NUMFEPSTATES 2 68 #endif 69 70 class GpuParallel3dFft; 71 72 /*! \internal \brief 73 * The main PME CUDA/OpenCL-specific host data structure, included in the PME GPU structure by the archSpecific pointer. 74 */ 75 struct PmeGpuSpecific 76 { 77 /*! \brief Constructor 78 * 79 * \param[in] deviceContext GPU device context 80 * \param[in] pmeStream GPU pme stream. 81 */ PmeGpuSpecificPmeGpuSpecific82 PmeGpuSpecific(const DeviceContext& deviceContext, const DeviceStream& pmeStream) : 83 deviceContext_(deviceContext), 84 pmeStream_(pmeStream) 85 { 86 } 87 88 /*! \brief 89 * A handle to the GPU context. 90 * TODO: this is currently extracted from the implementation of pmeGpu->programHandle_, 91 * but should be a constructor parameter to PmeGpu, as well as PmeGpuProgram, 92 * managed by high-level code. 93 */ 94 const DeviceContext& deviceContext_; 95 96 /*! \brief The GPU stream where everything related to the PME happens. */ 97 const DeviceStream& pmeStream_; 98 99 /* Synchronization events */ 100 /*! \brief Triggered after the PME Force Calculations have been completed */ 101 GpuEventSynchronizer pmeForcesReady; 102 /*! \brief Triggered after the grid has been copied to the host (after the spreading stage). */ 103 GpuEventSynchronizer syncSpreadGridD2H; 104 105 /* Settings which are set at the start of the run */ 106 /*! \brief A boolean which tells whether the complex and real grids for cu/clFFT are different or same. Currenty true. */ 107 bool performOutOfPlaceFFT = false; 108 /*! \brief A boolean which tells if the GPU timing events are enabled. 109 * False by default, can be enabled by setting the environment variable GMX_ENABLE_GPU_TIMING. 110 * Note: will not be reliable when multiple GPU tasks are running concurrently on the same 111 * device context, as CUDA events on multiple streams are untrustworthy. 112 */ 113 bool useTiming = false; 114 115 //! Vector of FFT setups 116 std::vector<std::unique_ptr<GpuParallel3dFft>> fftSetup; 117 118 //! All the timers one might use 119 std::array<GpuRegionTimer, gtPME_EVENT_COUNT> timingEvents; 120 121 //! Indices of timingEvents actually used 122 std::set<size_t> activeTimers; 123 124 /* GPU arrays element counts (not the arrays sizes in bytes!). 125 * They might be larger than the actual meaningful data sizes. 126 * These are paired: the actual element count + the maximum element count that can fit in the current allocated memory. 127 * These integer pairs are mostly meaningful for the reallocateDeviceBuffer calls. 128 * As such, if DeviceBuffer is refactored into a class, they can be freely changed, too. 129 * The only exceptions are realGridSize and complexGridSize which are also used for grid clearing/copying. 130 * TODO: these should live in a clean buffered container type, and be refactored in the NB/cudautils as well. 131 */ 132 /*! \brief The kernelParams.atoms.coordinates float element count (actual)*/ 133 int coordinatesSize = 0; 134 /*! \brief The kernelParams.atoms.coordinates float element count (reserved) */ 135 int coordinatesSizeAlloc = 0; 136 /*! \brief The kernelParams.atoms.forces float element count (actual) */ 137 int forcesSize = 0; 138 /*! \brief The kernelParams.atoms.forces float element count (reserved) */ 139 int forcesSizeAlloc = 0; 140 /*! \brief The kernelParams.atoms.gridlineIndices int element count (actual) */ 141 int gridlineIndicesSize = 0; 142 /*! \brief The kernelParams.atoms.gridlineIndices int element count (reserved) */ 143 int gridlineIndicesSizeAlloc = 0; 144 /*! \brief Both the kernelParams.atoms.theta and kernelParams.atoms.dtheta float element count (actual) */ 145 int splineDataSize = 0; 146 /*! \brief Both the kernelParams.atoms.theta and kernelParams.atoms.dtheta float element count (reserved) */ 147 int splineDataSizeAlloc = 0; 148 /*! \brief The kernelParams.atoms.coefficients float element count (actual) */ 149 int coefficientsSize[NUMFEPSTATES] = { 0, 0 }; 150 /*! \brief The kernelParams.atoms.coefficients float element count (reserved) */ 151 int coefficientsCapacity[NUMFEPSTATES] = { 0, 0 }; 152 /*! \brief The kernelParams.grid.splineValuesArray float element count (actual) */ 153 int splineValuesSize[NUMFEPSTATES] = { 0, 0 }; 154 /*! \brief The kernelParams.grid.splineValuesArray float element count (reserved) */ 155 int splineValuesCapacity[NUMFEPSTATES] = { 0, 0 }; 156 /*! \brief The kernelParams.grid.realGrid float element count (actual) */ 157 int realGridSize[NUMFEPSTATES] = { 0, 0 }; 158 /*! \brief The kernelParams.grid.realGrid float element count (reserved) */ 159 int realGridCapacity[NUMFEPSTATES] = { 0, 0 }; 160 /*! \brief The kernelParams.grid.fourierGrid float (not float2!) element count (actual) */ 161 int complexGridSize[NUMFEPSTATES] = { 0, 0 }; 162 /*! \brief The kernelParams.grid.fourierGrid float (not float2!) element count (reserved) */ 163 int complexGridCapacity[NUMFEPSTATES] = { 0, 0 }; 164 }; 165 166 #endif 167