1 /* -*- Mode: C; c-basic-offset:4 ; -*- */
2 /*
3 *
4 * (C) 2001 by Argonne National Laboratory.
5 * See COPYRIGHT in top-level directory.
6 */
7
8 #include "mpiimpl.h"
9 #include "mpicomm.h"
10
11 #define MPIR_INTERCOMM_CREATE_TAG 0
12
13 /* -- Begin Profiling Symbol Block for routine MPI_Intercomm_create */
14 #if defined(HAVE_PRAGMA_WEAK)
15 #pragma weak MPI_Intercomm_create = PMPI_Intercomm_create
16 #elif defined(HAVE_PRAGMA_HP_SEC_DEF)
17 #pragma _HP_SECONDARY_DEF PMPI_Intercomm_create MPI_Intercomm_create
18 #elif defined(HAVE_PRAGMA_CRI_DUP)
19 #pragma _CRI duplicate MPI_Intercomm_create as PMPI_Intercomm_create
20 #endif
21 /* -- End Profiling Symbol Block */
22
23 /* Define MPICH_MPI_FROM_PMPI if weak symbols are not supported to build
24 the MPI routines */
25 #ifdef HAVE_ERROR_CHECKING
26 PMPI_LOCAL int MPIR_CheckDisjointLpids( int [], int, int [], int );
27 #endif /* HAVE_ERROR_CHECKING */
28 PMPI_LOCAL int MPID_LPID_GetAllInComm( MPID_Comm *comm_ptr, int local_size,
29 int local_lpids[] );
30
31 #ifndef MPICH_MPI_FROM_PMPI
32 #undef MPI_Intercomm_create
33 #define MPI_Intercomm_create PMPI_Intercomm_create
34
35 #ifdef HAVE_ERROR_CHECKING
36 /* 128 allows us to handle up to 4k processes */
37 #define N_STATIC_LPID32 128
38 #undef FUNCNAME
39 #define FUNCNAME MPIR_CheckDisjointLpids
40 #undef FCNAME
41 #define FCNAME MPIU_QUOTE(FUNCNAME)
MPIR_CheckDisjointLpids(int lpids1[],int n1,int lpids2[],int n2)42 PMPI_LOCAL int MPIR_CheckDisjointLpids( int lpids1[], int n1,
43 int lpids2[], int n2 )
44 {
45 int i, mask_size, idx, bit, maxlpid = -1;
46 int mpi_errno = MPI_SUCCESS;
47 uint32_t lpidmaskPrealloc[N_STATIC_LPID32];
48 uint32_t *lpidmask;
49 MPIU_CHKLMEM_DECL(1);
50
51 /* Find the max lpid */
52 for (i=0; i<n1; i++) {
53 if (lpids1[i] > maxlpid) maxlpid = lpids1[i];
54 }
55 for (i=0; i<n2; i++) {
56 if (lpids2[i] > maxlpid) maxlpid = lpids2[i];
57 }
58
59 mask_size = (maxlpid / 32) + 1;
60
61 if (mask_size > N_STATIC_LPID32) {
62 MPIU_CHKLMEM_MALLOC(lpidmask,uint32_t*,mask_size*sizeof(uint32_t),
63 mpi_errno,"lpidmask");
64 }
65 else {
66 lpidmask = lpidmaskPrealloc;
67 }
68
69 /* zero the bitvector array */
70 memset(lpidmask, 0x00, mask_size*sizeof(*lpidmask));
71
72 /* Set the bits for the first array */
73 for (i=0; i<n1; i++) {
74 idx = lpids1[i] / 32;
75 bit = lpids1[i] % 32;
76 lpidmask[idx] = lpidmask[idx] | (1 << bit);
77 MPIU_Assert(idx < mask_size);
78 }
79
80 /* Look for any duplicates in the second array */
81 for (i=0; i<n2; i++) {
82 idx = lpids2[i] / 32;
83 bit = lpids2[i] % 32;
84 if (lpidmask[idx] & (1 << bit)) {
85 MPIU_ERR_SET1(mpi_errno,MPI_ERR_COMM,
86 "**dupprocesses", "**dupprocesses %d", lpids2[i] );
87 goto fn_fail;
88 }
89 /* Add a check on duplicates *within* group 2 */
90 lpidmask[idx] = lpidmask[idx] | (1 << bit);
91 MPIU_Assert(idx < mask_size);
92 }
93
94 /* Also fall through for normal return */
95 fn_fail:
96 MPIU_CHKLMEM_FREEALL();
97 return mpi_errno;
98
99 }
100 #endif /* HAVE_ERROR_CHECKING */
101
102 #ifndef HAVE_GPID_ROUTINES
103 /* FIXME: A temporary version for lpids within my comm world */
MPID_GPID_GetAllInComm(MPID_Comm * comm_ptr,int local_size,int local_gpids[],int * singlePG)104 PMPI_LOCAL int MPID_GPID_GetAllInComm( MPID_Comm *comm_ptr, int local_size,
105 int local_gpids[], int *singlePG )
106 {
107 int i;
108 int *gpid = local_gpids;
109
110 for (i=0; i<comm_ptr->local_size; i++) {
111 *gpid++ = 0;
112 (void)MPID_VCR_Get_lpid( comm_ptr->vcr[i], gpid );
113 gpid++;
114 }
115 *singlePG = 1;
116 return 0;
117 }
118
119 /* FIXME: A temp for lpids within my comm world */
MPID_GPID_ToLpidArray(int size,int gpid[],int lpid[])120 PMPI_LOCAL int MPID_GPID_ToLpidArray( int size, int gpid[], int lpid[] )
121 {
122 int i;
123
124 for (i=0; i<size; i++) {
125 lpid[i] = *++gpid; gpid++;
126 }
127 return 0;
128 }
129 /* FIXME: for MPI1, all process ids are relative to MPI_COMM_WORLD.
130 For MPI2, we'll need to do something more complex */
MPID_VCR_CommFromLpids(MPID_Comm * newcomm_ptr,int size,const int lpids[])131 PMPI_LOCAL int MPID_VCR_CommFromLpids( MPID_Comm *newcomm_ptr,
132 int size, const int lpids[] )
133 {
134 MPID_Comm *commworld_ptr;
135 int i;
136
137 commworld_ptr = MPIR_Process.comm_world;
138 /* Setup the communicator's vc table: remote group */
139 MPID_VCRT_Create( size, &newcomm_ptr->vcrt );
140 MPID_VCRT_Get_ptr( newcomm_ptr->vcrt, &newcomm_ptr->vcr );
141 for (i=0; i<size; i++) {
142 /* For rank i in the new communicator, find the corresponding
143 rank in the comm world (FIXME FOR MPI2) */
144 /* printf( "[%d] Remote rank %d has lpid %d\n",
145 MPIR_Process.comm_world->rank, i, lpids[i] ); */
146 if (lpids[i] < commworld_ptr->remote_size) {
147 MPID_VCR_Dup( commworld_ptr->vcr[lpids[i]],
148 &newcomm_ptr->vcr[i] );
149 }
150 else {
151 /* We must find the corresponding vcr for a given lpid */
152 /* FIXME: Error */
153 return 1;
154 /* MPID_VCR_Dup( ???, &newcomm_ptr->vcr[i] ); */
155 }
156 }
157 return 0;
158 }
159
160 #endif /* HAVE_GPID_ROUTINES */
161
MPID_LPID_GetAllInComm(MPID_Comm * comm_ptr,int local_size,int local_lpids[])162 PMPI_LOCAL int MPID_LPID_GetAllInComm( MPID_Comm *comm_ptr, int local_size,
163 int local_lpids[] )
164 {
165 int i;
166
167 /* FIXME: Should be using the local_size argument */
168 MPIU_Assert( comm_ptr->local_size == local_size );
169 for (i=0; i<comm_ptr->local_size; i++) {
170 (void)MPID_VCR_Get_lpid( comm_ptr->vcr[i], &local_lpids[i] );
171 }
172 return 0;
173 }
174
175 #undef FUNCNAME
176 #define FUNCNAME MPIR_Intercomm_create_impl
177 #undef FCNAME
178 #define FCNAME MPIU_QUOTE(FUNCNAME)
MPIR_Intercomm_create_impl(MPID_Comm * local_comm_ptr,int local_leader,MPID_Comm * peer_comm_ptr,int remote_leader,int tag,MPID_Comm ** new_intercomm_ptr)179 int MPIR_Intercomm_create_impl(MPID_Comm *local_comm_ptr, int local_leader,
180 MPID_Comm *peer_comm_ptr, int remote_leader, int tag,
181 MPID_Comm **new_intercomm_ptr)
182 {
183 int mpi_errno = MPI_SUCCESS;
184 MPIR_Context_id_t final_context_id, recvcontext_id;
185 int remote_size, *remote_lpids=0, *remote_gpids=0, singlePG;
186 int local_size, *local_gpids=0, *local_lpids=0;
187 int comm_info[3];
188 int is_low_group = 0;
189 int cts_tag;
190 int i;
191 int errflag = FALSE;
192 MPIU_CHKLMEM_DECL(4);
193 MPID_MPI_STATE_DECL(MPID_STATE_MPIR_INTERCOMM_CREATE_IMPL);
194
195 MPID_MPI_FUNC_ENTER(MPID_STATE_MPIR_INTERCOMM_CREATE_IMPL);
196
197 /* Shift tag into the tagged coll space (tag provided by the user
198 is ignored as of MPI 3.0) */
199 cts_tag = MPIR_INTERCOMM_CREATE_TAG | MPIR_Process.tagged_coll_mask;
200
201 /*
202 * Error checking for this routine requires care. Because this
203 * routine is collective over two different sets of processes,
204 * it is relatively easy for the user to try to create an
205 * intercommunicator from two overlapping groups of processes.
206 * This is made more likely by inconsistencies in the MPI-1
207 * specification (clarified in MPI-2) that seemed to allow
208 * the groups to overlap. Because of that, we first check that the
209 * groups are in fact disjoint before performing any collective
210 * operations.
211 */
212
213 if (local_comm_ptr->rank == local_leader) {
214
215 /* First, exchange the group information. If we were certain
216 that the groups were disjoint, we could exchange possible
217 context ids at the same time, saving one communication.
218 But experience has shown that that is a risky assumption.
219 */
220 /* Exchange information with my peer. Use sendrecv */
221 local_size = local_comm_ptr->local_size;
222
223 /* printf( "About to sendrecv in intercomm_create\n" );fflush(stdout);*/
224 MPIU_DBG_MSG_FMT(COMM,VERBOSE,(MPIU_DBG_FDEST,"rank %d sendrecv to rank %d", peer_comm_ptr->rank,
225 remote_leader));
226 mpi_errno = MPIC_Sendrecv( &local_size, 1, MPI_INT,
227 remote_leader, cts_tag,
228 &remote_size, 1, MPI_INT,
229 remote_leader, cts_tag,
230 peer_comm_ptr->handle, MPI_STATUS_IGNORE );
231 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
232
233 MPIU_DBG_MSG_FMT(COMM,VERBOSE,(MPIU_DBG_FDEST, "local size = %d, remote size = %d", local_size,
234 remote_size ));
235 /* With this information, we can now send and receive the
236 global process ids from the peer. */
237 MPIU_CHKLMEM_MALLOC(remote_gpids,int*,2*remote_size*sizeof(int), mpi_errno,"remote_gpids");
238 MPIU_CHKLMEM_MALLOC(remote_lpids,int*,remote_size*sizeof(int), mpi_errno,"remote_lpids");
239 MPIU_CHKLMEM_MALLOC(local_gpids,int*,2*local_size*sizeof(int), mpi_errno,"local_gpids");
240 MPIU_CHKLMEM_MALLOC(local_lpids,int*,local_size*sizeof(int), mpi_errno,"local_lpids");
241
242 mpi_errno = MPID_GPID_GetAllInComm( local_comm_ptr, local_size, local_gpids, &singlePG );
243 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
244
245 /* Exchange the lpid arrays */
246 mpi_errno = MPIC_Sendrecv( local_gpids, 2*local_size, MPI_INT,
247 remote_leader, cts_tag,
248 remote_gpids, 2*remote_size, MPI_INT,
249 remote_leader, cts_tag, peer_comm_ptr->handle, MPI_STATUS_IGNORE );
250 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
251
252 /* Convert the remote gpids to the lpids */
253 mpi_errno = MPID_GPID_ToLpidArray( remote_size, remote_gpids, remote_lpids );
254 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
255
256 /* Get our own lpids */
257 mpi_errno = MPID_LPID_GetAllInComm( local_comm_ptr, local_size, local_lpids );
258 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
259
260 # ifdef HAVE_ERROR_CHECKING
261 {
262 MPID_BEGIN_ERROR_CHECKS;
263 {
264 /* Now that we have both the local and remote processes,
265 check for any overlap */
266 mpi_errno = MPIR_CheckDisjointLpids( local_lpids, local_size, remote_lpids, remote_size );
267 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
268 }
269 MPID_END_ERROR_CHECKS;
270 }
271 # endif /* HAVE_ERROR_CHECKING */
272
273 /* Make an arbitrary decision about which group of processs is
274 the low group. The LEADERS do this by comparing the
275 local process ids of the 0th member of the two groups */
276 is_low_group = local_lpids[0] < remote_lpids[0];
277
278 /* At this point, we're done with the local lpids; they'll
279 be freed with the other local memory on exit */
280
281 } /* End of the first phase of the leader communication */
282
283 /*
284 * Create the contexts. Each group will have a context for sending
285 * to the other group. All processes must be involved. Because
286 * we know that the local and remote groups are disjoint, this
287 * step will complete
288 */
289 MPIU_DBG_MSG_FMT(COMM,VERBOSE, (MPIU_DBG_FDEST,"About to get contextid (local_size=%d) on rank %d",
290 local_comm_ptr->local_size, local_comm_ptr->rank ));
291 /* In the multi-threaded case, MPIR_Get_contextid assumes that the
292 calling routine already holds the single criticial section */
293 /* TODO: Make sure this is tag-safe */
294 mpi_errno = MPIR_Get_contextid( local_comm_ptr, &recvcontext_id );
295 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
296 MPIU_Assert(recvcontext_id != 0);
297 MPIU_DBG_MSG_FMT(COMM,VERBOSE, (MPIU_DBG_FDEST,"Got contextid=%d", recvcontext_id));
298
299 /* Leaders can now swap context ids and then broadcast the value
300 to the local group of processes */
301 if (local_comm_ptr->rank == local_leader) {
302 MPIR_Context_id_t remote_context_id;
303
304 mpi_errno = MPIC_Sendrecv( &recvcontext_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, cts_tag,
305 &remote_context_id, 1, MPIR_CONTEXT_ID_T_DATATYPE, remote_leader, cts_tag,
306 peer_comm_ptr->handle, MPI_STATUS_IGNORE );
307 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
308
309 final_context_id = remote_context_id;
310
311 /* Now, send all of our local processes the remote_lpids,
312 along with the final context id */
313 comm_info[0] = remote_size;
314 comm_info[1] = final_context_id;
315 comm_info[2] = is_low_group;
316 MPIU_DBG_MSG(COMM,VERBOSE,"About to bcast on local_comm");
317 mpi_errno = MPIR_Bcast_impl( comm_info, 3, MPI_INT, local_leader, local_comm_ptr, &errflag );
318 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
319 MPIU_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail");
320 mpi_errno = MPIR_Bcast_impl( remote_gpids, 2*remote_size, MPI_INT, local_leader,
321 local_comm_ptr, &errflag );
322 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
323 MPIU_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail");
324 MPIU_DBG_MSG_D(COMM,VERBOSE,"end of bcast on local_comm of size %d",
325 local_comm_ptr->local_size );
326 }
327 else
328 {
329 /* we're the other processes */
330 MPIU_DBG_MSG(COMM,VERBOSE,"About to receive bcast on local_comm");
331 mpi_errno = MPIR_Bcast_impl( comm_info, 3, MPI_INT, local_leader, local_comm_ptr, &errflag );
332 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
333 MPIU_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail");
334 remote_size = comm_info[0];
335 MPIU_CHKLMEM_MALLOC(remote_gpids,int*,2*remote_size*sizeof(int), mpi_errno,"remote_gpids");
336 MPIU_CHKLMEM_MALLOC(remote_lpids,int*,remote_size*sizeof(int), mpi_errno,"remote_lpids");
337 mpi_errno = MPIR_Bcast_impl( remote_gpids, 2*remote_size, MPI_INT, local_leader,
338 local_comm_ptr, &errflag );
339 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
340 MPIU_ERR_CHKANDJUMP(errflag, mpi_errno, MPI_ERR_OTHER, "**coll_fail");
341
342 /* Extract the context and group sign informatin */
343 final_context_id = comm_info[1];
344 is_low_group = comm_info[2];
345 }
346
347 /* Finish up by giving the device the opportunity to update
348 any other infomration among these processes. Note that the
349 new intercomm has not been set up; in fact, we haven't yet
350 attempted to set up the connection tables.
351
352 In the case of the ch3 device, this calls MPID_PG_ForwardPGInfo
353 to ensure that all processes have the information about all
354 process groups. This must be done before the call
355 to MPID_GPID_ToLpidArray, as that call needs to know about
356 all of the process groups.
357 */
358 #ifdef MPID_ICCREATE_REMOTECOMM_HOOK
359 MPID_ICCREATE_REMOTECOMM_HOOK( peer_comm_ptr, local_comm_ptr,
360 remote_size, remote_gpids, local_leader );
361
362 #endif
363
364 /* Finally, if we are not the local leader, we need to
365 convert the remote gpids to local pids. This must be done
366 after we allow the device to handle any steps that it needs to
367 take to ensure that all processes contain the necessary process
368 group information */
369 if (local_comm_ptr->rank != local_leader) {
370 mpi_errno = MPID_GPID_ToLpidArray( remote_size, remote_gpids, remote_lpids );
371 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
372 }
373
374
375 /* At last, we now have the information that we need to build the
376 intercommunicator */
377
378 /* All processes in the local_comm now build the communicator */
379
380 mpi_errno = MPIR_Comm_create( new_intercomm_ptr );
381 if (mpi_errno) goto fn_fail;
382
383 (*new_intercomm_ptr)->context_id = final_context_id;
384 (*new_intercomm_ptr)->recvcontext_id = recvcontext_id;
385 (*new_intercomm_ptr)->remote_size = remote_size;
386 (*new_intercomm_ptr)->local_size = local_comm_ptr->local_size;
387 (*new_intercomm_ptr)->rank = local_comm_ptr->rank;
388 (*new_intercomm_ptr)->comm_kind = MPID_INTERCOMM;
389 (*new_intercomm_ptr)->local_comm = 0;
390 (*new_intercomm_ptr)->is_low_group = is_low_group;
391
392 mpi_errno = MPID_VCR_CommFromLpids( *new_intercomm_ptr, remote_size, remote_lpids );
393 if (mpi_errno) goto fn_fail;
394
395 /* Setup the communicator's vc table: local group. This is
396 just a duplicate of the local_comm's group */
397 MPID_VCRT_Create( local_comm_ptr->local_size, &(*new_intercomm_ptr)->local_vcrt );
398 MPID_VCRT_Get_ptr( (*new_intercomm_ptr)->local_vcrt, &(*new_intercomm_ptr)->local_vcr );
399 for (i = 0; i < local_comm_ptr->local_size; i++) {
400 MPID_VCR_Dup( local_comm_ptr->vcr[i], &(*new_intercomm_ptr)->local_vcr[i] );
401 }
402
403 /* Inherit the error handler (if any) */
404 MPIU_THREAD_CS_ENTER(MPI_OBJ, local_comm_ptr);
405 (*new_intercomm_ptr)->errhandler = local_comm_ptr->errhandler;
406 if (local_comm_ptr->errhandler) {
407 MPIR_Errhandler_add_ref( local_comm_ptr->errhandler );
408 }
409 MPIU_THREAD_CS_EXIT(MPI_OBJ, local_comm_ptr);
410
411 mpi_errno = MPIR_Comm_commit(*new_intercomm_ptr);
412 if (mpi_errno) MPIU_ERR_POP(mpi_errno);
413
414
415 fn_exit:
416 MPIU_CHKLMEM_FREEALL();
417 MPID_MPI_FUNC_EXIT(MPID_STATE_MPIR_INTERCOMM_CREATE_IMPL);
418 return mpi_errno;
419 fn_fail:
420 goto fn_exit;
421 }
422
423
424 #endif /* MPICH_MPI_FROM_PMPI */
425
426
427 #undef FUNCNAME
428 #define FUNCNAME MPI_Intercomm_create
429 #undef FCNAME
430 #define FCNAME MPIU_QUOTE(FUNCNAME)
431 /*@
432
433 MPI_Intercomm_create - Creates an intercommuncator from two intracommunicators
434
435 Input Parameters:
436 + local_comm - Local (intra)communicator
437 . local_leader - Rank in local_comm of leader (often 0)
438 . peer_comm - Communicator used to communicate between a
439 designated process in the other communicator.
440 Significant only at the process in 'local_comm' with
441 rank 'local_leader'.
442 . remote_leader - Rank in peer_comm of remote leader (often 0)
443 - tag - Message tag to use in constructing intercommunicator; if multiple
444 'MPI_Intercomm_creates' are being made, they should use different tags (more
445 precisely, ensure that the local and remote leaders are using different
446 tags for each 'MPI_intercomm_create').
447
448 Output Parameter:
449 . comm_out - Created intercommunicator
450
451 Notes:
452 'peer_comm' is significant only for the process designated the
453 'local_leader' in the 'local_comm'.
454
455 The MPI 1.1 Standard contains two mutually exclusive comments on the
456 input intercommunicators. One says that their repective groups must be
457 disjoint; the other that the leaders can be the same process. After
458 some discussion by the MPI Forum, it has been decided that the groups must
459 be disjoint. Note that the `reason` given for this in the standard is
460 `not` the reason for this choice; rather, the `other` operations on
461 intercommunicators (like 'MPI_Intercomm_merge') do not make sense if the
462 groups are not disjoint.
463
464 .N ThreadSafe
465
466 .N Fortran
467
468 .N Errors
469 .N MPI_SUCCESS
470 .N MPI_ERR_COMM
471 .N MPI_ERR_TAG
472 .N MPI_ERR_EXHAUSTED
473 .N MPI_ERR_RANK
474
475 .seealso: MPI_Intercomm_merge, MPI_Comm_free, MPI_Comm_remote_group,
476 MPI_Comm_remote_size
477
478 @*/
MPI_Intercomm_create(MPI_Comm local_comm,int local_leader,MPI_Comm peer_comm,int remote_leader,int tag,MPI_Comm * newintercomm)479 int MPI_Intercomm_create(MPI_Comm local_comm, int local_leader,
480 MPI_Comm peer_comm, int remote_leader, int tag,
481 MPI_Comm *newintercomm)
482 {
483 int mpi_errno = MPI_SUCCESS;
484 MPID_Comm *local_comm_ptr = NULL;
485 MPID_Comm *peer_comm_ptr = NULL;
486 MPID_Comm *new_intercomm_ptr;
487 MPID_MPI_STATE_DECL(MPID_STATE_MPI_INTERCOMM_CREATE);
488
489 MPIR_ERRTEST_INITIALIZED_ORDIE();
490
491 MPIU_THREAD_CS_ENTER(ALLFUNC,);
492 MPID_MPI_FUNC_ENTER(MPID_STATE_MPI_INTERCOMM_CREATE);
493
494 /* Validate parameters, especially handles needing to be converted */
495 # ifdef HAVE_ERROR_CHECKING
496 {
497 MPID_BEGIN_ERROR_CHECKS;
498 {
499 MPIR_ERRTEST_COMM_TAG(tag, mpi_errno);
500 MPIR_ERRTEST_COMM(local_comm, mpi_errno);
501 }
502 MPID_END_ERROR_CHECKS;
503 }
504 # endif /* HAVE_ERROR_CHECKING */
505
506 /* Convert MPI object handles to object pointers */
507 MPID_Comm_get_ptr( local_comm, local_comm_ptr );
508
509 /* Validate parameters and objects (post conversion) */
510 # ifdef HAVE_ERROR_CHECKING
511 {
512 MPID_BEGIN_ERROR_CHECKS;
513 {
514 /* Validate local_comm_ptr */
515 MPID_Comm_valid_ptr( local_comm_ptr, mpi_errno );
516 if (local_comm_ptr) {
517 /* Only check if local_comm_ptr valid */
518 MPIR_ERRTEST_COMM_INTRA(local_comm_ptr, mpi_errno );
519 if ((local_leader < 0 ||
520 local_leader >= local_comm_ptr->local_size)) {
521 MPIU_ERR_SET2(mpi_errno,MPI_ERR_RANK,
522 "**ranklocal", "**ranklocal %d %d",
523 local_leader, local_comm_ptr->local_size - 1 );
524 /* If local_comm_ptr is not valid, it will be reset to null */
525 if (mpi_errno) goto fn_fail;
526 }
527 if (local_comm_ptr->rank == local_leader) {
528 MPIR_ERRTEST_COMM(peer_comm, mpi_errno);
529 }
530 }
531 }
532 MPID_END_ERROR_CHECKS;
533 }
534 # endif /* HAVE_ERROR_CHECKING */
535
536 if (local_comm_ptr->rank == local_leader) {
537
538 MPID_Comm_get_ptr( peer_comm, peer_comm_ptr );
539 # ifdef HAVE_ERROR_CHECKING
540 {
541 MPID_BEGIN_ERROR_CHECKS;
542 {
543 MPID_Comm_valid_ptr( peer_comm_ptr, mpi_errno );
544 /* Note: In MPI 1.0, peer_comm was restricted to
545 intracommunicators. In 1.1, it may be any communicator */
546
547 /* In checking the rank of the remote leader,
548 allow the peer_comm to be in intercommunicator
549 by checking against the remote size */
550 if (!mpi_errno && peer_comm_ptr &&
551 (remote_leader < 0 ||
552 remote_leader >= peer_comm_ptr->remote_size)) {
553 MPIU_ERR_SET2(mpi_errno,MPI_ERR_RANK,
554 "**rankremote", "**rankremote %d %d",
555 remote_leader, peer_comm_ptr->remote_size - 1 );
556 }
557 /* Check that the local leader and the remote leader are
558 different processes. This test requires looking at
559 the lpid for the two ranks in their respective
560 communicators. However, an easy test is for
561 the same ranks in an intracommunicator; we only
562 need the lpid comparison for intercommunicators */
563 /* Here is the test. We restrict this test to the
564 process that is the local leader (local_comm_ptr->rank ==
565 local_leader because we can then use peer_comm_ptr->rank
566 to get the rank in peer_comm of the local leader. */
567 if (peer_comm_ptr->comm_kind == MPID_INTRACOMM &&
568 local_comm_ptr->rank == local_leader &&
569 peer_comm_ptr->rank == remote_leader) {
570 MPIU_ERR_SET(mpi_errno,MPI_ERR_RANK,"**ranksdistinct");
571 }
572 if (mpi_errno) goto fn_fail;
573 }
574 MPID_END_ERROR_CHECKS;
575 }
576 # endif /* HAVE_ERROR_CHECKING */
577 }
578
579 /* ... body of routine ... */
580 mpi_errno = MPIR_Intercomm_create_impl(local_comm_ptr, local_leader, peer_comm_ptr,
581 remote_leader, tag, &new_intercomm_ptr);
582 if (mpi_errno) goto fn_fail;
583
584 MPIU_OBJ_PUBLISH_HANDLE(*newintercomm, new_intercomm_ptr->handle);
585 /* ... end of body of routine ... */
586
587 fn_exit:
588 MPID_MPI_FUNC_EXIT(MPID_STATE_MPI_INTERCOMM_CREATE);
589 MPIU_THREAD_CS_EXIT(ALLFUNC,);
590 return mpi_errno;
591
592 fn_fail:
593 /* --BEGIN ERROR HANDLING-- */
594 # ifdef HAVE_ERROR_CHECKING
595 {
596 mpi_errno = MPIR_Err_create_code(
597 mpi_errno, MPIR_ERR_RECOVERABLE, FCNAME, __LINE__, MPI_ERR_OTHER,
598 "**mpi_intercomm_create",
599 "**mpi_intercomm_create %C %d %C %d %d %p", local_comm,
600 local_leader, peer_comm, remote_leader, tag, newintercomm);
601 }
602 # endif /* HAVE_ERROR_CHECKING */
603 mpi_errno = MPIR_Err_return_comm( local_comm_ptr, FCNAME, mpi_errno );
604 goto fn_exit;
605 /* --END ERROR HANDLING-- */
606 }
607