1 /* -*- Mode: C; c-basic-offset:4 ; -*- */
2 /*
3 *
4 * (C) 2001 by Argonne National Laboratory.
5 * See COPYRIGHT in top-level directory.
6 */
7
8 #include "mpiimpl.h"
9
10 #if !defined(MPID_REQUEST_PTR_ARRAY_SIZE)
11 /* use a larger default size of 64 in order to enhance SQMR performance */
12 #define MPID_REQUEST_PTR_ARRAY_SIZE 64
13 #endif
14
15 /* -- Begin Profiling Symbol Block for routine MPI_Waitall */
16 #if defined(HAVE_PRAGMA_WEAK)
17 #pragma weak MPI_Waitall = PMPI_Waitall
18 #elif defined(HAVE_PRAGMA_HP_SEC_DEF)
19 #pragma _HP_SECONDARY_DEF PMPI_Waitall MPI_Waitall
20 #elif defined(HAVE_PRAGMA_CRI_DUP)
21 #pragma _CRI duplicate MPI_Waitall as PMPI_Waitall
22 #endif
23 /* -- End Profiling Symbol Block */
24
25 /* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build
26 the MPI routines */
27 #ifndef MPICH_MPI_FROM_PMPI
28 #undef MPI_Waitall
29 #define MPI_Waitall PMPI_Waitall
30
31 #undef FUNCNAME
32 #define FUNCNAME MPIR_Waitall_impl
33 #undef FCNAME
34 #define FCNAME MPIU_QUOTE(FUNCNAME)
MPIR_Waitall_impl(int count,MPI_Request array_of_requests[],MPI_Status array_of_statuses[])35 int MPIR_Waitall_impl(int count, MPI_Request array_of_requests[],
36 MPI_Status array_of_statuses[])
37 {
38 int mpi_errno = MPI_SUCCESS;
39 MPID_Request * request_ptr_array[MPID_REQUEST_PTR_ARRAY_SIZE];
40 MPID_Request ** request_ptrs = request_ptr_array;
41 MPI_Status * status_ptr;
42 MPID_Progress_state progress_state;
43 int i, j;
44 int n_completed;
45 int active_flag;
46 int rc;
47 int n_greqs;
48 const int ignoring_statuses = (array_of_statuses == MPI_STATUSES_IGNORE);
49 int optimize = ignoring_statuses; /* see NOTE-O1 */
50 MPIU_CHKLMEM_DECL(1);
51
52 /* Convert MPI request handles to a request object pointers */
53 if (count > MPID_REQUEST_PTR_ARRAY_SIZE)
54 {
55 MPIU_CHKLMEM_MALLOC(request_ptrs, MPID_Request **, count * sizeof(MPID_Request *), mpi_errno, "request pointers");
56 }
57
58 n_greqs = 0;
59 n_completed = 0;
60 for (i = 0; i < count; i++)
61 {
62 if (array_of_requests[i] != MPI_REQUEST_NULL)
63 {
64 MPID_Request_get_ptr(array_of_requests[i], request_ptrs[i]);
65 /* Validate object pointers if error checking is enabled */
66 # ifdef HAVE_ERROR_CHECKING
67 {
68 MPID_BEGIN_ERROR_CHECKS;
69 {
70 MPID_Request_valid_ptr( request_ptrs[i], mpi_errno );
71 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
72 MPIU_ERR_CHKANDJUMP1((request_ptrs[i]->kind == MPID_REQUEST_MPROBE),
73 mpi_errno, MPI_ERR_ARG, "**msgnotreq", "**msgnotreq %d", i);
74 }
75 MPID_END_ERROR_CHECKS;
76 }
77 # endif
78 if (request_ptrs[i]->kind != MPID_REQUEST_RECV &&
79 request_ptrs[i]->kind != MPID_REQUEST_SEND)
80 {
81 optimize = FALSE;
82 }
83
84 if (request_ptrs[i]->kind == MPID_UREQUEST)
85 ++n_greqs;
86 }
87 else
88 {
89 status_ptr = (array_of_statuses != MPI_STATUSES_IGNORE) ? &array_of_statuses[i] : MPI_STATUS_IGNORE;
90 MPIR_Status_set_empty(status_ptr);
91 request_ptrs[i] = NULL;
92 n_completed += 1;
93 optimize = FALSE;
94 }
95 }
96
97 if (n_completed == count)
98 {
99 goto fn_exit;
100 }
101
102 /* NOTE-O1: high-message-rate optimization. For simple send and recv
103 * operations and MPI_STATUSES_IGNORE we use a fastpath approach that strips
104 * out as many unnecessary jumps and error handling as possible.
105 *
106 * Possible variation: permit request_ptrs[i]==NULL at the cost of an
107 * additional branch inside the for-loop below. */
108 if (optimize) {
109 MPID_Progress_start(&progress_state);
110 for (i = 0; i < count; ++i) {
111 while (!MPID_Request_is_complete(request_ptrs[i])) {
112 mpi_errno = MPID_Progress_wait(&progress_state);
113 /* must check and handle the error, can't guard with HAVE_ERROR_CHECKING, but it's
114 * OK for the error case to be slower */
115 if (unlikely(mpi_errno)) {
116 /* --BEGIN ERROR HANDLING-- */
117 MPID_Progress_end(&progress_state);
118 MPIU_ERR_POP(mpi_errno);
119 /* --END ERROR HANDLING-- */
120 }
121 }
122 mpi_errno = MPIR_Request_complete_fastpath(&array_of_requests[i], request_ptrs[i]);
123 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
124 }
125
126 MPID_Progress_end(&progress_state);
127
128 goto fn_exit;
129 }
130
131 /* ------ "slow" code path below ------ */
132
133 /* Grequest_waitall may run the progress engine - thus, we don't
134 invoke progress_start until after running Grequest_waitall */
135 /* first, complete any generalized requests */
136 if (n_greqs)
137 {
138 mpi_errno = MPIR_Grequest_waitall(count, request_ptrs);
139 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
140 }
141
142 MPID_Progress_start(&progress_state);
143
144 for (i = 0; i < count; i++)
145 {
146 if (request_ptrs[i] == NULL)
147 {
148 if (!ignoring_statuses)
149 array_of_statuses[i].MPI_ERROR = MPI_SUCCESS;
150 continue;
151 }
152
153 /* wait for ith request to complete */
154 while (!MPID_Request_is_complete(request_ptrs[i]))
155 {
156 /* generalized requests should already be finished */
157 MPIU_Assert(request_ptrs[i]->kind != MPID_UREQUEST);
158
159 mpi_errno = MPID_Progress_wait(&progress_state);
160 if (mpi_errno != MPI_SUCCESS) {
161 /* --BEGIN ERROR HANDLING-- */
162 MPID_Progress_end(&progress_state);
163 MPIU_ERR_POP(mpi_errno);
164 /* --END ERROR HANDLING-- */
165 }
166 }
167
168 /* complete the request and check the status */
169 status_ptr = (ignoring_statuses) ? MPI_STATUS_IGNORE : &array_of_statuses[i];
170 rc = MPIR_Request_complete(&array_of_requests[i], request_ptrs[i], status_ptr, &active_flag);
171 if (rc == MPI_SUCCESS)
172 {
173 request_ptrs[i] = NULL;
174 if (!ignoring_statuses)
175 status_ptr->MPI_ERROR = MPI_SUCCESS;
176 }
177 else
178 {
179 /* req completed with an error */
180 mpi_errno = MPI_ERR_IN_STATUS;
181 if (!ignoring_statuses)
182 {
183 /* set the error code for this request */
184 status_ptr->MPI_ERROR = rc;
185
186 /* set the error codes for the rest of the uncompleted requests to PENDING */
187 for (j = i+1; j < count; ++j)
188 {
189 if (!ignoring_statuses)
190 {
191 if (request_ptrs[j] == NULL)
192 {
193 /* either the user specified MPI_REQUEST_NULL, or this is a completed greq */
194 array_of_statuses[j].MPI_ERROR = MPI_SUCCESS;
195 }
196 else
197 {
198 array_of_statuses[j].MPI_ERROR = MPI_ERR_PENDING;
199 }
200 }
201 }
202 }
203 break;
204 }
205 }
206 MPID_Progress_end(&progress_state);
207
208 fn_exit:
209 if (count > MPID_REQUEST_PTR_ARRAY_SIZE)
210 {
211 MPIU_CHKLMEM_FREEALL();
212 }
213
214 return mpi_errno;
215 fn_fail:
216 goto fn_exit;
217 }
218
219 #endif
220
221 #undef FUNCNAME
222 #define FUNCNAME MPI_Waitall
223 #undef FCNAME
224 #define FCNAME MPIU_QUOTE(FUNCNAME)
225 /*@
226 MPI_Waitall - Waits for all given MPI Requests to complete
227
228 Input Parameters:
229 + count - list length (integer)
230 - array_of_requests - array of request handles (array of handles)
231
232 Output Parameter:
233 . array_of_statuses - array of status objects (array of Statuses). May be
234 'MPI_STATUSES_IGNORE'.
235
236 Notes:
237
238 If one or more of the requests completes with an error, 'MPI_ERR_IN_STATUS' is
239 returned. An error value will be present is elements of 'array_of_status'
240 associated with the requests. Likewise, the 'MPI_ERROR' field in the status
241 elements associated with requests that have successfully completed will be
242 'MPI_SUCCESS'. Finally, those requests that have not completed will have a
243 value of 'MPI_ERR_PENDING'.
244
245 While it is possible to list a request handle more than once in the
246 array_of_requests, such an action is considered erroneous and may cause the
247 program to unexecpectedly terminate or produce incorrect results.
248
249 .N waitstatus
250
251 .N ThreadSafe
252
253 .N Fortran
254
255 .N Errors
256 .N MPI_SUCCESS
257 .N MPI_ERR_REQUEST
258 .N MPI_ERR_ARG
259 .N MPI_ERR_IN_STATUS
260 @*/
MPI_Waitall(int count,MPI_Request array_of_requests[],MPI_Status array_of_statuses[])261 int MPI_Waitall(int count, MPI_Request array_of_requests[],
262 MPI_Status array_of_statuses[])
263 {
264 int mpi_errno = MPI_SUCCESS;
265 MPID_MPI_STATE_DECL(MPID_STATE_MPI_WAITALL);
266
267 MPIR_ERRTEST_INITIALIZED_ORDIE();
268
269 MPIU_THREAD_CS_ENTER(ALLFUNC,);
270 MPID_MPI_PT2PT_FUNC_ENTER(MPID_STATE_MPI_WAITALL);
271
272 /* Check the arguments */
273 # ifdef HAVE_ERROR_CHECKING
274 {
275 MPID_BEGIN_ERROR_CHECKS;
276 {
277 int i;
278 MPIR_ERRTEST_COUNT(count, mpi_errno);
279
280 if (count != 0) {
281 MPIR_ERRTEST_ARGNULL(array_of_requests, "array_of_requests", mpi_errno);
282 /* NOTE: MPI_STATUSES_IGNORE != NULL */
283
284 MPIR_ERRTEST_ARGNULL(array_of_statuses, "array_of_statuses", mpi_errno);
285 }
286
287 for (i = 0; i < count; i++) {
288 MPIR_ERRTEST_ARRAYREQUEST_OR_NULL(array_of_requests[i], i, mpi_errno);
289 }
290 }
291 MPID_END_ERROR_CHECKS;
292 }
293 # endif /* HAVE_ERROR_CHECKING */
294
295 /* ... body of routine ... */
296
297 mpi_errno = MPIR_Waitall_impl(count, array_of_requests, array_of_statuses);
298 if (mpi_errno) goto fn_fail;
299
300 /* ... end of body of routine ... */
301
302 fn_exit:
303 MPID_MPI_PT2PT_FUNC_EXIT(MPID_STATE_MPI_WAITALL);
304 MPIU_THREAD_CS_EXIT(ALLFUNC,);
305 return mpi_errno;
306
307 fn_fail:
308 /* --BEGIN ERROR HANDLING-- */
309 #ifdef HAVE_ERROR_CHECKING
310 mpi_errno = MPIR_Err_create_code(mpi_errno, MPIR_ERR_RECOVERABLE,
311 FCNAME, __LINE__, MPI_ERR_OTHER,
312 "**mpi_waitall",
313 "**mpi_waitall %d %p %p",
314 count, array_of_requests,
315 array_of_statuses);
316 #endif
317 mpi_errno = MPIR_Err_return_comm(NULL, FCNAME, mpi_errno);
318 goto fn_exit;
319 /* --END ERROR HANDLING-- */
320 }
321