1 /*
2    (C) 2001 by Argonne National Laboratory.
3        See COPYRIGHT in top-level directory.
4 */
5 #include <stdio.h>
6 #include <string.h>
7 #include "mpi.h"
8 
9 #define REQUESTS_SIZE  10
10 #define STATUSES_SIZE  10
11 #define LOOP_COUNT     200
12 
main(int argc,char * argv[])13 int main( int argc, char *argv[] )
14 {
15     MPI_Comm     splited_comm, duped_comm, inter_comm, *comm_ptr;
16     MPI_Request  world_requests[REQUESTS_SIZE], comm_requests[REQUESTS_SIZE];
17     MPI_Status   world_statuses[STATUSES_SIZE], comm_statuses[STATUSES_SIZE];
18     char         processor_name[MPI_MAX_PROCESSOR_NAME];
19     int          comm_rank, comm_size, comm_neighbor;
20     int          world_rank, world_size, world_neighbor;
21     int          icolor, namelen, ibuffers[REQUESTS_SIZE];
22     int          ii, jj;
23 
24     MPI_Init( &argc, &argv );
25     MPI_Comm_size( MPI_COMM_WORLD, &world_size );
26     MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );
27     MPI_Get_processor_name( processor_name, &namelen );
28 
29     fprintf( stdout, "world_rank %d on %s\n", world_rank, processor_name );
30     fflush( stdout );
31 
32     if ( world_rank == world_size - 1 )
33         world_neighbor = 0;
34     else
35         world_neighbor = world_rank + 1;
36 
37     for ( ii = 0; ii < LOOP_COUNT; ii++ ) {
38         for ( jj = 0; jj < REQUESTS_SIZE; jj++ ) {
39             MPI_Irecv( &ibuffers[jj], 1, MPI_INT, MPI_ANY_SOURCE,
40                        99, MPI_COMM_WORLD, &world_requests[jj] );
41             MPI_Send( &world_rank, 1, MPI_INT, world_neighbor,
42                       99, MPI_COMM_WORLD );
43         }
44         MPI_Waitall( REQUESTS_SIZE, world_requests, world_statuses );
45     }
46 
47     /* Split all processes into 2 separate intracommunicators */
48     icolor = world_rank % 2;
49     MPI_Comm_split( MPI_COMM_WORLD, icolor, world_rank, &splited_comm );
50 
51     /* Put in a Comm_dup so local comm ID is different in 2 splited comm */
52     if ( icolor == 0 ) {
53         MPI_Comm_dup( splited_comm, &duped_comm );
54         comm_ptr  = &duped_comm;
55     }
56     else
57         comm_ptr  = &splited_comm;
58 
59     MPI_Comm_size( *comm_ptr, &comm_size );
60     MPI_Comm_rank( *comm_ptr, &comm_rank );
61 
62     if ( comm_rank == comm_size - 1 )
63         comm_neighbor = 0;
64     else
65         comm_neighbor = comm_rank + 1;
66 
67     for ( ii = 0; ii < LOOP_COUNT; ii++ ) {
68         for ( jj = 0; jj < REQUESTS_SIZE; jj++ ) {
69             MPI_Irecv( &ibuffers[jj], 1, MPI_INT, MPI_ANY_SOURCE,
70                        999, *comm_ptr, &comm_requests[jj] );
71             MPI_Send( &comm_rank, 1, MPI_INT, comm_neighbor,
72                       999, *comm_ptr );
73         }
74         MPI_Waitall( REQUESTS_SIZE, comm_requests, comm_statuses );
75     }
76 
77     /* Form an intercomm between the 2 splited intracomm's */
78     if ( icolor == 0 )
79         MPI_Intercomm_create( *comm_ptr, 0, MPI_COMM_WORLD, 1,
80                               9090, &inter_comm );
81     else
82         MPI_Intercomm_create( *comm_ptr, 0, MPI_COMM_WORLD, 0,
83                               9090, &inter_comm );
84 
85     if ( comm_rank == 0 ) {
86         for ( ii = 0; ii < LOOP_COUNT; ii++ ) {
87             for ( jj = 0; jj < REQUESTS_SIZE; jj++ ) {
88                 MPI_Irecv( &ibuffers[jj], 1, MPI_INT, 0,
89                            9999, inter_comm, &comm_requests[jj] );
90                 MPI_Send( &comm_rank, 1, MPI_INT, 0, 9999, inter_comm );
91             }
92             MPI_Waitall( REQUESTS_SIZE, comm_requests, comm_statuses );
93         }
94     }
95 
96     /* Free all communicators created */
97     MPI_Comm_free( &inter_comm );
98     if ( icolor == 0 )
99         MPI_Comm_free( &duped_comm );
100     MPI_Comm_free( &splited_comm );
101 
102     MPI_Finalize();
103     return( 0 );
104 }
105