1 /*
2  * Copyright (C) by Argonne National Laboratory
3  *     See COPYRIGHT in top-level directory
4  */
5 
6 #include "mpiimpl.h"
7 
8 /*
9 === BEGIN_MPI_T_CVAR_INFO_BLOCK ===
10 
11 cvars:
12     - name        : MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTRA_ALGORITHM
13       category    : COLLECTIVE
14       type        : enum
15       default     : auto
16       class       : none
17       verbosity   : MPI_T_VERBOSITY_USER_BASIC
18       scope       : MPI_T_SCOPE_ALL_EQ
19       description : |-
20         Variable to select neighbor_allgatherv algorithm
21         auto - Internal algorithm selection (can be overridden with MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE)
22         nb   - Force nb algorithm
23 
24     - name        : MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTER_ALGORITHM
25       category    : COLLECTIVE
26       type        : enum
27       default     : auto
28       class       : none
29       verbosity   : MPI_T_VERBOSITY_USER_BASIC
30       scope       : MPI_T_SCOPE_ALL_EQ
31       description : |-
32         Variable to select neighbor_allgatherv algorithm
33         auto - Internal algorithm selection (can be overridden with MPIR_CVAR_COLL_SELECTION_TUNING_JSON_FILE)
34         nb   - Force nb algorithm
35 
36     - name        : MPIR_CVAR_NEIGHBOR_ALLGATHERV_DEVICE_COLLECTIVE
37       category    : COLLECTIVE
38       type        : boolean
39       default     : true
40       class       : none
41       verbosity   : MPI_T_VERBOSITY_USER_BASIC
42       scope       : MPI_T_SCOPE_ALL_EQ
43       description : >-
44         This CVAR is only used when MPIR_CVAR_DEVICE_COLLECTIVES
45         is set to "percoll".  If set to true, MPI_Neighbor_allgatherv will
46         allow the device to override the MPIR-level collective
47         algorithms.  The device might still call the MPIR-level
48         algorithms manually.  If set to false, the device-override
49         will be disabled.
50 
51 === END_MPI_T_CVAR_INFO_BLOCK ===
52 */
53 
54 /* -- Begin Profiling Symbol Block for routine MPI_Neighbor_allgatherv */
55 #if defined(HAVE_PRAGMA_WEAK)
56 #pragma weak MPI_Neighbor_allgatherv = PMPI_Neighbor_allgatherv
57 #elif defined(HAVE_PRAGMA_HP_SEC_DEF)
58 #pragma _HP_SECONDARY_DEF PMPI_Neighbor_allgatherv  MPI_Neighbor_allgatherv
59 #elif defined(HAVE_PRAGMA_CRI_DUP)
60 #pragma _CRI duplicate MPI_Neighbor_allgatherv as PMPI_Neighbor_allgatherv
61 #elif defined(HAVE_WEAK_ATTRIBUTE)
62 int MPI_Neighbor_allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
63                             void *recvbuf, const int recvcounts[], const int displs[],
64                             MPI_Datatype recvtype, MPI_Comm comm)
65     __attribute__ ((weak, alias("PMPI_Neighbor_allgatherv")));
66 #endif
67 /* -- End Profiling Symbol Block */
68 
69 /* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build
70    the MPI routines */
71 #ifndef MPICH_MPI_FROM_PMPI
72 #undef MPI_Neighbor_allgatherv
73 #define MPI_Neighbor_allgatherv PMPI_Neighbor_allgatherv
74 
75 /* any non-MPI functions go here, especially non-static ones */
76 
77 
MPIR_Neighbor_allgatherv_allcomm_auto(const void * sendbuf,int sendcount,MPI_Datatype sendtype,void * recvbuf,const int recvcounts[],const int displs[],MPI_Datatype recvtype,MPIR_Comm * comm_ptr)78 int MPIR_Neighbor_allgatherv_allcomm_auto(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
79                                           void *recvbuf, const int recvcounts[], const int displs[],
80                                           MPI_Datatype recvtype, MPIR_Comm * comm_ptr)
81 {
82     int mpi_errno = MPI_SUCCESS;
83 
84     MPIR_Csel_coll_sig_s coll_sig = {
85         .coll_type = MPIR_CSEL_COLL_TYPE__NEIGHBOR_ALLGATHERV,
86         .comm_ptr = comm_ptr,
87 
88         .u.neighbor_allgatherv.sendbuf = sendbuf,
89         .u.neighbor_allgatherv.sendcount = sendcount,
90         .u.neighbor_allgatherv.sendtype = sendtype,
91         .u.neighbor_allgatherv.recvbuf = recvbuf,
92         .u.neighbor_allgatherv.recvcounts = recvcounts,
93         .u.neighbor_allgatherv.displs = displs,
94         .u.neighbor_allgatherv.recvtype = recvtype,
95     };
96 
97     MPII_Csel_container_s *cnt = MPIR_Csel_search(comm_ptr->csel_comm, coll_sig);
98     MPIR_Assert(cnt);
99 
100     switch (cnt->id) {
101         case MPII_CSEL_CONTAINER_TYPE__ALGORITHM__MPIR_Neighbor_allgatherv_allcomm_nb:
102             mpi_errno =
103                 MPIR_Neighbor_allgatherv_allcomm_nb(sendbuf, sendcount, sendtype, recvbuf,
104                                                     recvcounts, displs, recvtype, comm_ptr);
105             break;
106 
107         default:
108             MPIR_Assert(0);
109     }
110 
111     return mpi_errno;
112 }
113 
MPIR_Neighbor_allgatherv_impl(const void * sendbuf,int sendcount,MPI_Datatype sendtype,void * recvbuf,const int recvcounts[],const int displs[],MPI_Datatype recvtype,MPIR_Comm * comm_ptr)114 int MPIR_Neighbor_allgatherv_impl(const void *sendbuf, int sendcount,
115                                   MPI_Datatype sendtype, void *recvbuf,
116                                   const int recvcounts[], const int displs[],
117                                   MPI_Datatype recvtype, MPIR_Comm * comm_ptr)
118 {
119     int mpi_errno = MPI_SUCCESS;
120 
121     if (comm_ptr->comm_kind == MPIR_COMM_KIND__INTRACOMM) {
122         switch (MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTRA_ALGORITHM) {
123             case MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTRA_ALGORITHM_nb:
124                 mpi_errno = MPIR_Neighbor_allgatherv_allcomm_nb(sendbuf, sendcount, sendtype,
125                                                                 recvbuf, recvcounts, displs,
126                                                                 recvtype, comm_ptr);
127                 break;
128             case MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTRA_ALGORITHM_auto:
129                 mpi_errno = MPIR_Neighbor_allgatherv_allcomm_auto(sendbuf, sendcount, sendtype,
130                                                                   recvbuf, recvcounts, displs,
131                                                                   recvtype, comm_ptr);
132                 break;
133             default:
134                 MPIR_Assert(0);
135         }
136     } else {
137         switch (MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTER_ALGORITHM) {
138             case MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTER_ALGORITHM_nb:
139                 mpi_errno = MPIR_Neighbor_allgatherv_allcomm_nb(sendbuf, sendcount, sendtype,
140                                                                 recvbuf, recvcounts, displs,
141                                                                 recvtype, comm_ptr);
142                 break;
143             case MPIR_CVAR_NEIGHBOR_ALLGATHERV_INTER_ALGORITHM_auto:
144                 mpi_errno = MPIR_Neighbor_allgatherv_allcomm_auto(sendbuf, sendcount, sendtype,
145                                                                   recvbuf, recvcounts, displs,
146                                                                   recvtype, comm_ptr);
147                 break;
148             default:
149                 MPIR_Assert(0);
150         }
151     }
152     MPIR_ERR_CHECK(mpi_errno);
153 
154   fn_exit:
155     return mpi_errno;
156   fn_fail:
157     goto fn_exit;
158 }
159 
MPIR_Neighbor_allgatherv(const void * sendbuf,int sendcount,MPI_Datatype sendtype,void * recvbuf,const int recvcounts[],const int displs[],MPI_Datatype recvtype,MPIR_Comm * comm_ptr)160 int MPIR_Neighbor_allgatherv(const void *sendbuf, int sendcount,
161                              MPI_Datatype sendtype, void *recvbuf,
162                              const int recvcounts[], const int displs[],
163                              MPI_Datatype recvtype, MPIR_Comm * comm_ptr)
164 {
165     int mpi_errno = MPI_SUCCESS;
166 
167     if ((MPIR_CVAR_DEVICE_COLLECTIVES == MPIR_CVAR_DEVICE_COLLECTIVES_all) ||
168         ((MPIR_CVAR_DEVICE_COLLECTIVES == MPIR_CVAR_DEVICE_COLLECTIVES_percoll) &&
169          MPIR_CVAR_BARRIER_DEVICE_COLLECTIVE)) {
170         mpi_errno =
171             MPID_Neighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs,
172                                      recvtype, comm_ptr);
173     } else {
174         mpi_errno = MPIR_Neighbor_allgatherv_impl(sendbuf, sendcount, sendtype,
175                                                   recvbuf, recvcounts, displs, recvtype, comm_ptr);
176     }
177 
178     return mpi_errno;
179 }
180 
181 #endif /* MPICH_MPI_FROM_PMPI */
182 
183 /*@
184 MPI_Neighbor_allgatherv - The vector variant of MPI_Neighbor_allgather.
185 
186 Input Parameters:
187 + sendbuf - starting address of the send buffer (choice)
188 . sendcount - number of elements sent to each neighbor (non-negative integer)
189 . sendtype - data type of send buffer elements (handle)
190 . recvcounts - non-negative integer array (of length indegree) containing the number of elements that are received from each neighbor
191 . displs - integer array (of length indegree). Entry i specifies the displacement (relative to recvbuf) at which to place the incoming data from neighbor i.
192 . recvtype - data type of receive buffer elements (handle)
193 - comm - communicator (handle)
194 
195 Output Parameters:
196 . recvbuf - starting address of the receive buffer (choice)
197 
198 .N ThreadSafe
199 
200 .N Fortran
201 
202 .N Errors
203 @*/
MPI_Neighbor_allgatherv(const void * sendbuf,int sendcount,MPI_Datatype sendtype,void * recvbuf,const int recvcounts[],const int displs[],MPI_Datatype recvtype,MPI_Comm comm)204 int MPI_Neighbor_allgatherv(const void *sendbuf, int sendcount, MPI_Datatype sendtype,
205                             void *recvbuf, const int recvcounts[], const int displs[],
206                             MPI_Datatype recvtype, MPI_Comm comm)
207 {
208     int mpi_errno = MPI_SUCCESS;
209     MPIR_Comm *comm_ptr = NULL;
210     MPIR_FUNC_TERSE_STATE_DECL(MPID_STATE_MPI_NEIGHBOR_ALLGATHERV);
211 
212     MPID_THREAD_CS_ENTER(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
213     MPIR_FUNC_TERSE_ENTER(MPID_STATE_MPI_NEIGHBOR_ALLGATHERV);
214 
215     /* Validate parameters, especially handles needing to be converted */
216 #ifdef HAVE_ERROR_CHECKING
217     {
218         MPID_BEGIN_ERROR_CHECKS;
219         {
220             MPIR_ERRTEST_DATATYPE(sendtype, "sendtype", mpi_errno);
221             MPIR_ERRTEST_DATATYPE(recvtype, "recvtype", mpi_errno);
222             MPIR_ERRTEST_COMM(comm, mpi_errno);
223 
224             /* TODO more checks may be appropriate */
225         }
226         MPID_END_ERROR_CHECKS;
227     }
228 #endif /* HAVE_ERROR_CHECKING */
229 
230     /* Convert MPI object handles to object pointers */
231     MPIR_Comm_get_ptr(comm, comm_ptr);
232 
233     /* Validate parameters and objects (post conversion) */
234 #ifdef HAVE_ERROR_CHECKING
235     {
236         MPID_BEGIN_ERROR_CHECKS;
237         {
238             if (!HANDLE_IS_BUILTIN(sendtype)) {
239                 MPIR_Datatype *sendtype_ptr = NULL;
240                 MPIR_Datatype_get_ptr(sendtype, sendtype_ptr);
241                 MPIR_Datatype_valid_ptr(sendtype_ptr, mpi_errno);
242                 MPIR_Datatype_committed_ptr(sendtype_ptr, mpi_errno);
243             }
244 
245             if (!HANDLE_IS_BUILTIN(recvtype)) {
246                 MPIR_Datatype *recvtype_ptr = NULL;
247                 MPIR_Datatype_get_ptr(recvtype, recvtype_ptr);
248                 MPIR_Datatype_valid_ptr(recvtype_ptr, mpi_errno);
249                 MPIR_Datatype_committed_ptr(recvtype_ptr, mpi_errno);
250             }
251 
252             MPIR_Comm_valid_ptr(comm_ptr, mpi_errno, FALSE);
253             /* TODO more checks may be appropriate (counts, in_place, buffer aliasing, etc) */
254             if (mpi_errno != MPI_SUCCESS)
255                 goto fn_fail;
256         }
257         MPID_END_ERROR_CHECKS;
258     }
259 #endif /* HAVE_ERROR_CHECKING */
260 
261     /* ... body of routine ...  */
262 
263     mpi_errno = MPIR_Neighbor_allgatherv(sendbuf, sendcount, sendtype, recvbuf,
264                                          recvcounts, displs, recvtype, comm_ptr);
265     MPIR_ERR_CHECK(mpi_errno);
266 
267     /* ... end of body of routine ... */
268 
269   fn_exit:
270     MPIR_FUNC_TERSE_EXIT(MPID_STATE_MPI_NEIGHBOR_ALLGATHERV);
271     MPID_THREAD_CS_EXIT(GLOBAL, MPIR_THREAD_GLOBAL_ALLFUNC_MUTEX);
272     return mpi_errno;
273 
274   fn_fail:
275     /* --BEGIN ERROR HANDLING-- */
276 #ifdef HAVE_ERROR_CHECKING
277     {
278         mpi_errno =
279             MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE, __func__, __LINE__, MPI_ERR_OTHER,
280                                  "**mpi_neighbor_allgatherv",
281                                  "**mpi_neighbor_allgatherv %p %d %D %p %p %p %D %C", sendbuf,
282                                  sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm);
283     }
284 #endif
285     mpi_errno = MPIR_Err_return_comm(NULL, __func__, mpi_errno);
286     goto fn_exit;
287     /* --END ERROR HANDLING-- */
288 }
289