1 /*
2 * This file is part of the GROMACS molecular simulation package.
3 *
4 * Copyright (c) 2019,2020, by the GROMACS development team, led by
5 * Mark Abraham, David van der Spoel, Berk Hess, and Erik Lindahl,
6 * and including many others, as listed in the AUTHORS file in the
7 * top-level source directory and at http://www.gromacs.org.
8 *
9 * GROMACS is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public License
11 * as published by the Free Software Foundation; either version 2.1
12 * of the License, or (at your option) any later version.
13 *
14 * GROMACS is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
18 *
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with GROMACS; if not, see
21 * http://www.gnu.org/licenses, or write to the Free Software Foundation,
22 * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 *
24 * If you want to redistribute modifications to GROMACS, please
25 * consider that scientific software is very special. Version
26 * control is crucial - bugs must be traceable. We will be happy to
27 * consider code for inclusion in the official distribution, but
28 * derived work must not be called official GROMACS. Details are found
29 * in the README & COPYING files - if they are missing, get the
30 * official version at http://www.gromacs.org.
31 *
32 * To help us fund GROMACS development, we humbly ask that you cite
33 * the research papers on the package. Check out http://www.gromacs.org.
34 */
35 /*! \internal \file
36 *
37 * \brief Implements class which recieves coordinates to GPU memory on PME task using CUDA
38 *
39 *
40 * \author Alan Gray <alang@nvidia.com>
41 *
42 * \ingroup module_ewald
43 */
44 #include "gmxpre.h"
45
46 #include "pme_coordinate_receiver_gpu_impl.h"
47
48 #include "config.h"
49
50 #include "gromacs/ewald/pme_force_sender_gpu.h"
51 #include "gromacs/gpu_utils/cudautils.cuh"
52 #include "gromacs/gpu_utils/gpueventsynchronizer.cuh"
53 #include "gromacs/utility/gmxmpi.h"
54
55 namespace gmx
56 {
57
Impl(const DeviceStream & pmeStream,MPI_Comm comm,gmx::ArrayRef<PpRanks> ppRanks)58 PmeCoordinateReceiverGpu::Impl::Impl(const DeviceStream& pmeStream,
59 MPI_Comm comm,
60 gmx::ArrayRef<PpRanks> ppRanks) :
61 pmeStream_(pmeStream),
62 comm_(comm),
63 ppRanks_(ppRanks)
64 {
65 GMX_RELEASE_ASSERT(
66 GMX_THREAD_MPI,
67 "PME-PP GPU Communication is currently only supported with thread-MPI enabled");
68 request_.resize(ppRanks.size());
69 ppSync_.resize(ppRanks.size());
70 }
71
72 PmeCoordinateReceiverGpu::Impl::~Impl() = default;
73
sendCoordinateBufferAddressToPpRanks(DeviceBuffer<RVec> d_x)74 void PmeCoordinateReceiverGpu::Impl::sendCoordinateBufferAddressToPpRanks(DeviceBuffer<RVec> d_x)
75 {
76
77 int ind_start = 0;
78 int ind_end = 0;
79 for (const auto& receiver : ppRanks_)
80 {
81 ind_start = ind_end;
82 ind_end = ind_start + receiver.numAtoms;
83
84 // Data will be transferred directly from GPU.
85 void* sendBuf = reinterpret_cast<void*>(&d_x[ind_start]);
86
87 #if GMX_MPI
88 MPI_Send(&sendBuf, sizeof(void**), MPI_BYTE, receiver.rankId, 0, comm_);
89 #else
90 GMX_UNUSED_VALUE(sendBuf);
91 #endif
92 }
93 }
94
95 /*! \brief Receive coordinate data directly using CUDA memory copy */
launchReceiveCoordinatesFromPpCudaDirect(int ppRank)96 void PmeCoordinateReceiverGpu::Impl::launchReceiveCoordinatesFromPpCudaDirect(int ppRank)
97 {
98 // Data will be pushed directly from PP task
99
100 #if GMX_MPI
101 // Receive event from PP task
102 MPI_Irecv(&ppSync_[recvCount_], sizeof(GpuEventSynchronizer*), MPI_BYTE, ppRank, 0, comm_,
103 &request_[recvCount_]);
104 recvCount_++;
105 #else
106 GMX_UNUSED_VALUE(ppRank);
107 #endif
108 }
109
enqueueWaitReceiveCoordinatesFromPpCudaDirect()110 void PmeCoordinateReceiverGpu::Impl::enqueueWaitReceiveCoordinatesFromPpCudaDirect()
111 {
112 if (recvCount_ > 0)
113 {
114 // ensure PME calculation doesn't commence until coordinate data has been transferred
115 #if GMX_MPI
116 MPI_Waitall(recvCount_, request_.data(), MPI_STATUS_IGNORE);
117 #endif
118 for (int i = 0; i < recvCount_; i++)
119 {
120 ppSync_[i]->enqueueWaitEvent(pmeStream_);
121 }
122 // reset receive counter
123 recvCount_ = 0;
124 }
125 }
126
PmeCoordinateReceiverGpu(const DeviceStream & pmeStream,MPI_Comm comm,gmx::ArrayRef<PpRanks> ppRanks)127 PmeCoordinateReceiverGpu::PmeCoordinateReceiverGpu(const DeviceStream& pmeStream,
128 MPI_Comm comm,
129 gmx::ArrayRef<PpRanks> ppRanks) :
130 impl_(new Impl(pmeStream, comm, ppRanks))
131 {
132 }
133
134 PmeCoordinateReceiverGpu::~PmeCoordinateReceiverGpu() = default;
135
sendCoordinateBufferAddressToPpRanks(DeviceBuffer<RVec> d_x)136 void PmeCoordinateReceiverGpu::sendCoordinateBufferAddressToPpRanks(DeviceBuffer<RVec> d_x)
137 {
138 impl_->sendCoordinateBufferAddressToPpRanks(d_x);
139 }
140
launchReceiveCoordinatesFromPpCudaDirect(int ppRank)141 void PmeCoordinateReceiverGpu::launchReceiveCoordinatesFromPpCudaDirect(int ppRank)
142 {
143 impl_->launchReceiveCoordinatesFromPpCudaDirect(ppRank);
144 }
145
enqueueWaitReceiveCoordinatesFromPpCudaDirect()146 void PmeCoordinateReceiverGpu::enqueueWaitReceiveCoordinatesFromPpCudaDirect()
147 {
148 impl_->enqueueWaitReceiveCoordinatesFromPpCudaDirect();
149 }
150
151 } // namespace gmx
152