1 /*
2    (C) 2001 by Argonne National Laboratory.
3        See COPYRIGHT in top-level directory.
4 */
5 #include "mpe_logging_conf.h"
6 
7 #if defined( HAVE_STDIO_H ) || defined( STDC_HEADERS )
8 #include <stdio.h>
9 #endif
10 #if defined( STDC_HEADERS ) || defined( HAVE_STRING_H )
11 #include <string.h>
12 #endif
13 #if defined( STDC_HEADERS ) || defined( HAVE_STDLIB_H )
14 #include <stdlib.h>
15 #endif
16 #if defined( HAVE_UNISTD_H )
17 #include <unistd.h>
18 #endif
19 
20 #include "clog_mem.h"
21 #include "clog_const.h"
22 #include "mpe_callstack.h"
23 #include "mpi_null.h"
24 
25 #if defined( NEEDS_GETHOSTNAME_DECL )
26 int gethostname(char *name, size_t len);
27 #endif
28 
29 #define ZMPI_PRINTSTACK() \
30         do { \
31             MPE_CallStack_t  cstk; \
32             MPE_CallStack_init( &cstk ); \
33             MPE_CallStack_fancyprint( &cstk, 2, \
34                                       "\t", 1, MPE_CALLSTACK_UNLIMITED ); \
35         } while (0)
36 
37 static const int ZMPI_BOOL_FALSE = CLOG_BOOL_FALSE;
38 static const int ZMPI_BOOL_TRUE  = CLOG_BOOL_TRUE;
39 
40 #define MPE_COMM_MAX        5
41 #define MPE_COMM_KEY_MAX   10
42 
43 typedef struct {
44     int   size;
45     int   rank;
46     void *attrs[MPE_COMM_KEY_MAX];
47     int   is_attr_used[MPE_COMM_KEY_MAX];
48 } ZMPI_Comm_t;
49 
50 /*
51     In this serial MPI implementation: MPI_Comm which is defined as int is
52     the index to the ZMPI_COMMSET->commptrs[] arrary.
53 */
54 typedef struct {
55     ZMPI_Comm_t  *commptrs[ MPE_COMM_MAX ];
56     char          name[ MPI_MAX_PROCESSOR_NAME ];
57     int           namelen;
58     int           is_key_used[ MPE_COMM_KEY_MAX ];
59 } ZMPI_CommSet_t;
60 
61 static ZMPI_CommSet_t*  ZMPI_COMMSET = NULL;
62 
63 int MPI_WTIME_IS_GLOBAL;
64 
65 int MPI_WTIME_IS_GLOBAL_VALUE;
66 
ZMPI_Comm_create(void)67 static ZMPI_Comm_t*  ZMPI_Comm_create( void )
68 {
69     ZMPI_Comm_t  *commptr;
70     int           idx;
71 
72     commptr = (ZMPI_Comm_t *) MALLOC( sizeof(ZMPI_Comm_t) );
73     if ( commptr == NULL ) {
74         fprintf( stderr, __FILE__":ZMPI_Comm_create() -- MALLOC fails.\n" );
75         fflush( stderr );
76         return NULL;
77     }
78     commptr->size = 1;
79     commptr->rank = 0;
80 
81     for ( idx = 0; idx < MPE_COMM_KEY_MAX; idx++ ) {
82         commptr->attrs[idx]  = NULL;
83         commptr->is_attr_used[idx] = ZMPI_BOOL_FALSE;
84     }
85 
86     return commptr;
87 }
88 
ZMPI_Comm_free(ZMPI_Comm_t ** comm_handle)89 static void ZMPI_Comm_free( ZMPI_Comm_t **comm_handle )
90 {
91     ZMPI_Comm_t  *commptr;
92 
93     commptr = *comm_handle;
94     if ( commptr != NULL )
95         FREE( commptr );
96     *comm_handle = NULL;
97 }
98 
PMPI_Init(int * argc,char *** argv)99 int PMPI_Init( int *argc, char ***argv )
100 {
101     int  *extra_state = NULL; /* unused variable */
102     int   idx;
103 
104     ZMPI_COMMSET = (ZMPI_CommSet_t *) MALLOC( sizeof(ZMPI_CommSet_t) );
105     if ( ZMPI_COMMSET == NULL ) {
106         fprintf( stderr, __FILE__":PMPI_Init() -- MALLOC fails.\n" );
107         fflush( stderr );
108         return MPI_ERR_NO_MEM;
109     }
110 
111     if ( gethostname( ZMPI_COMMSET->name, MPI_MAX_PROCESSOR_NAME ) != 0 ) {
112         /*
113            Since gethostname() fails, write a NULL character at the end
114            of the name[] array to make sure the subsequent strlen() succeed.
115         */
116         ZMPI_COMMSET->name[MPI_MAX_PROCESSOR_NAME-1] = '\0';
117     }
118     ZMPI_COMMSET->namelen = strlen( ZMPI_COMMSET->name );
119 
120     ZMPI_COMMSET->commptrs[MPI_COMM_WORLD] = ZMPI_Comm_create();
121     if ( ZMPI_COMMSET->commptrs[MPI_COMM_WORLD] == NULL ) {
122         fprintf( stderr, __FILE__":PMPI_Init() -- "
123                          "ZMPI_Comm_create() for MPI_COMM_WORLD fails.\n" );
124         fflush( stderr );
125         PMPI_Abort( MPI_COMM_WORLD, 1 );
126         return MPI_ERR_NO_MEM;
127     }
128     ZMPI_COMMSET->commptrs[MPI_COMM_SELF]  = ZMPI_Comm_create();
129     if ( ZMPI_COMMSET->commptrs[MPI_COMM_SELF] == NULL ) {
130         fprintf( stderr, __FILE__":PMPI_Init() -- "
131                          "ZMPI_Comm_create() for MPI_COMM_SELF fails.\n" );
132         fflush( stderr );
133         PMPI_Abort( MPI_COMM_WORLD, 1 );
134         return MPI_ERR_NO_MEM;
135     }
136 
137     for ( idx = MPI_COMM_SELF+1; idx < MPE_COMM_MAX; idx++ )
138         ZMPI_COMMSET->commptrs[idx] = NULL;
139 
140     for ( idx = 0; idx < MPE_COMM_MAX; idx++ )
141         ZMPI_COMMSET->is_key_used[idx] = ZMPI_BOOL_FALSE;
142 
143     /*
144        Create a key for MPI_WTIME_IS_GLOBAL and set it to true
145        since there is only 1 process in this MPI implementation.
146        This is done for CLOG_Util_is_MPIWtime_synchronized()
147        which should return TRUE.
148     */
149     PMPI_Comm_create_keyval( MPI_COMM_NULL_COPY_FN,
150                              MPI_COMM_NULL_DELETE_FN,
151                              &MPI_WTIME_IS_GLOBAL, extra_state );
152     MPI_WTIME_IS_GLOBAL_VALUE = CLOG_BOOL_TRUE;
153     PMPI_Comm_set_attr( MPI_COMM_WORLD, MPI_WTIME_IS_GLOBAL,
154                         &MPI_WTIME_IS_GLOBAL_VALUE );
155     PMPI_Comm_set_attr( MPI_COMM_SELF, MPI_WTIME_IS_GLOBAL,
156                         &MPI_WTIME_IS_GLOBAL_VALUE );
157 
158     return MPI_SUCCESS;
159 }
160 
PMPI_Finalize(void)161 int PMPI_Finalize( void )
162 {
163     int idx;
164 
165     if ( ZMPI_COMMSET != NULL ) {
166         for ( idx = 0; idx < MPE_COMM_MAX; idx++ )
167             ZMPI_Comm_free( &(ZMPI_COMMSET->commptrs[idx]) );
168         FREE( ZMPI_COMMSET );
169         ZMPI_COMMSET = NULL;
170     }
171 
172     return MPI_SUCCESS;
173 }
174 
PMPI_Abort(MPI_Comm comm,int errorcode)175 int PMPI_Abort( MPI_Comm comm, int errorcode )
176 {
177     fprintf( stdout, __FILE__":PMPI_Abort( MPI_Comm=%d, errorcode=%d) -- "
178                      "Aborting...\n", comm, errorcode );
179     ZMPI_PRINTSTACK();
180     PMPI_Finalize();
181     exit( 1 );
182     return MPI_SUCCESS;
183 }
184 
PMPI_Initialized(int * flag)185 int PMPI_Initialized( int *flag )
186 {
187     *flag = ( ZMPI_COMMSET != NULL );
188     return MPI_SUCCESS;
189 }
190 
PMPI_Get_processor_name(char * name,int * resultlen)191 int PMPI_Get_processor_name( char *name, int *resultlen )
192 {
193     strncpy( name, ZMPI_COMMSET->name, MPI_MAX_PROCESSOR_NAME );
194     *resultlen = strlen( name );
195     return MPI_SUCCESS;
196 }
197 
PMPI_Comm_size(MPI_Comm comm,int * size)198 int PMPI_Comm_size( MPI_Comm comm, int *size )
199 {
200     *size = ZMPI_COMMSET->commptrs[comm]->size;
201     return MPI_SUCCESS;
202 }
203 
PMPI_Comm_rank(MPI_Comm comm,int * rank)204 int PMPI_Comm_rank( MPI_Comm comm, int *rank )
205 {
206     *rank = ZMPI_COMMSET->commptrs[comm]->rank;
207     return MPI_SUCCESS;
208 }
209 
210 /*
211     Assume gettimeofday() exists, maybe configure needs to check this function.
212 */
213 #if defined( HAVE_SYS_TIME_H )
214 #include <sys/time.h>
215 #endif
PMPI_Wtime(void)216 double PMPI_Wtime( void )
217 {
218     struct timeval  tval;
219     gettimeofday( &tval, NULL );
220     return ( (double) tval.tv_sec + (double) tval.tv_usec * 0.000001 );
221 }
222 
PMPI_Comm_create_keyval(MPI_Comm_copy_attr_function * comm_copy_attr_fn,MPI_Comm_delete_attr_function * comm_delete_attr_fn,int * comm_keyval,void * extra_state)223 int PMPI_Comm_create_keyval( MPI_Comm_copy_attr_function *comm_copy_attr_fn,
224                              MPI_Comm_delete_attr_function *comm_delete_attr_fn,
225                              int *comm_keyval, void *extra_state )
226 {
227     int  avail_key;
228 
229     for ( avail_key = 0; avail_key < MPE_COMM_KEY_MAX; avail_key++ ) {
230         if ( ZMPI_COMMSET->is_key_used[avail_key] == ZMPI_BOOL_FALSE )
231             break;
232     }
233 
234     if ( avail_key < MPE_COMM_KEY_MAX ) {
235         ZMPI_COMMSET->is_key_used[avail_key] = ZMPI_BOOL_TRUE;
236         *comm_keyval = avail_key;
237         return MPI_SUCCESS;
238     }
239     else {
240         fprintf( stderr, __FILE__":PMPI_Comm_create_keyval() -- "
241                          "Exceeding internal keyval[] size.\n" );
242         fflush( stderr );
243         PMPI_Abort( MPI_COMM_WORLD, 1 );
244         return MPI_ERR_KEYVAL;
245     }
246 }
247 
PMPI_Comm_free_keyval(int * comm_keyval)248 int PMPI_Comm_free_keyval( int *comm_keyval )
249 {
250     if ( *comm_keyval >= 0 && *comm_keyval < MPE_COMM_KEY_MAX ) {
251         ZMPI_COMMSET->is_key_used[ *comm_keyval ] = ZMPI_BOOL_FALSE;
252         *comm_keyval = MPI_KEYVAL_INVALID;
253         return MPI_SUCCESS;
254     }
255     else {
256         fprintf( stderr, __FILE__":PMPI_Comm_free_keyval() -- "
257                          "Invalid comm_keyval, %d.\n", *comm_keyval );
258         fflush( stderr );
259         PMPI_Abort( MPI_COMM_WORLD, 1 );
260         return MPI_ERR_KEYVAL;
261     }
262 }
263 
PMPI_Comm_set_attr(MPI_Comm comm,int comm_keyval,void * attribute_val)264 int PMPI_Comm_set_attr( MPI_Comm comm, int comm_keyval,
265                         void *attribute_val )
266 {
267     if ( comm_keyval >= 0 && comm_keyval < MPE_COMM_KEY_MAX ) {
268         ZMPI_COMMSET->commptrs[comm]->attrs[comm_keyval] = attribute_val;
269         return MPI_SUCCESS;
270     }
271     else {
272         fprintf( stderr, __FILE__":PMPI_Comm_set_attr(MPI_Comm=%d) -- "
273                          "Invalid comm_keyval, %d.\n", comm, comm_keyval );
274         fflush( stderr );
275         PMPI_Abort( comm, 1 );
276         return MPI_ERR_KEYVAL;
277     }
278 }
279 
PMPI_Comm_get_attr(MPI_Comm comm,int comm_keyval,void * attribute_val,int * flag)280 int PMPI_Comm_get_attr( MPI_Comm comm, int comm_keyval,
281                         void *attribute_val, int *flag )
282 {
283     if ( comm_keyval >= 0 && comm_keyval < MPE_COMM_KEY_MAX ) {
284         (*(void **)attribute_val)
285         = ZMPI_COMMSET->commptrs[comm]->attrs[comm_keyval];
286         *flag = ZMPI_BOOL_TRUE;
287     }
288     else {
289         *flag = ZMPI_BOOL_FALSE;
290         fprintf( stderr, __FILE__":PMPI_Comm_get_attr(MPI_Comm=%d) -- "
291                          "Invalid comm_keyval, %d.\n", comm, comm_keyval );
292         fflush( stderr );
293     }
294     return MPI_SUCCESS;
295 }
296 
PMPI_Comm_test_inter(MPI_Comm comm,int * flag)297 int PMPI_Comm_test_inter( MPI_Comm comm, int *flag )
298 {
299     *flag = ZMPI_BOOL_FALSE;
300     return MPI_SUCCESS;
301 }
302 
PMPI_Ssend(void * buf,int count,MPI_Datatype datatype,int dest,int tag,MPI_Comm comm)303 int PMPI_Ssend( void *buf, int count, MPI_Datatype datatype, int dest,
304                 int tag, MPI_Comm comm )
305 {
306     fprintf( stderr, __FILE__":PMPI_Ssend() should not be invoked!" );
307     ZMPI_PRINTSTACK();
308     return MPI_SUCCESS;
309 }
310 
PMPI_Send(void * buf,int count,MPI_Datatype datatype,int dest,int tag,MPI_Comm comm)311 int PMPI_Send( void *buf, int count, MPI_Datatype datatype, int dest,
312                int tag, MPI_Comm comm )
313 {
314     fprintf( stderr, __FILE__":PMPI_Send() should not be invoked!" );
315     ZMPI_PRINTSTACK();
316     return MPI_SUCCESS;
317 }
318 
PMPI_Recv(void * buf,int count,MPI_Datatype datatype,int source,int tag,MPI_Comm comm,MPI_Status * status)319 int PMPI_Recv( void *buf, int count, MPI_Datatype datatype, int source,
320                int tag, MPI_Comm comm, MPI_Status *status )
321 {
322     fprintf( stderr, __FILE__":PMPI_Recv() should not be invoked!" );
323     ZMPI_PRINTSTACK();
324     return MPI_SUCCESS;
325 }
326 
PMPI_Irecv(void * buf,int count,MPI_Datatype datatype,int source,int tag,MPI_Comm comm,MPI_Request * request)327 int PMPI_Irecv( void *buf, int count, MPI_Datatype datatype, int source,
328                 int tag, MPI_Comm comm, MPI_Request *request )
329 {
330     fprintf( stderr, __FILE__":PMPI_Irecv() should not be invoked!" );
331     ZMPI_PRINTSTACK();
332     return MPI_SUCCESS;
333 }
334 
PMPI_Wait(MPI_Request * request,MPI_Status * status)335 int PMPI_Wait( MPI_Request *request, MPI_Status *status )
336 {
337     fprintf( stderr, __FILE__":PMPI_Wait() should not be invoked!" );
338     ZMPI_PRINTSTACK();
339     return MPI_SUCCESS;
340 }
341 
PMPI_Get_count(MPI_Status * status,MPI_Datatype datatype,int * count)342 int PMPI_Get_count( MPI_Status *status,  MPI_Datatype datatype, int *count )
343 {
344     if ( status != NULL ) {
345         if ((status->count % datatype) != 0)
346             (*count) = MPI_UNDEFINED;
347         else
348             (*count) = status->count / datatype;
349     }
350     else {
351         *count = MPI_UNDEFINED;
352     }
353     return MPI_SUCCESS;
354 }
355 
PMPI_Barrier(MPI_Comm comm)356 int PMPI_Barrier( MPI_Comm comm )
357 {
358     return MPI_SUCCESS;
359 }
360 
PMPI_Bcast(void * buffer,int count,MPI_Datatype datatype,int root,MPI_Comm comm)361 int PMPI_Bcast( void *buffer, int count, MPI_Datatype datatype,
362                 int root, MPI_Comm comm )
363 {
364     return MPI_SUCCESS;
365 }
366 
PMPI_Scatter(void * sendbuf,int sendcnt,MPI_Datatype sendtype,void * recvbuf,int recvcnt,MPI_Datatype recvtype,int root,MPI_Comm comm)367 int PMPI_Scatter( void *sendbuf, int sendcnt, MPI_Datatype sendtype,
368                   void *recvbuf, int recvcnt, MPI_Datatype recvtype,
369                   int root, MPI_Comm comm )
370 {
371     if ( sendbuf != recvbuf )
372         memcpy( recvbuf, sendbuf, sendcnt*sendtype );
373     return MPI_SUCCESS;
374 }
375 
PMPI_Gather(void * sendbuf,int sendcnt,MPI_Datatype sendtype,void * recvbuf,int recvcnt,MPI_Datatype recvtype,int root,MPI_Comm comm)376 int PMPI_Gather( void *sendbuf, int sendcnt, MPI_Datatype sendtype,
377                  void *recvbuf, int recvcnt, MPI_Datatype recvtype,
378                  int root, MPI_Comm comm )
379 {
380     if ( sendbuf != recvbuf )
381         memcpy( recvbuf, sendbuf, sendcnt*sendtype );
382     return MPI_SUCCESS;
383 }
384 
PMPI_Scan(void * sendbuf,void * recvbuf,int count,MPI_Datatype datatype,MPI_Op op,MPI_Comm comm)385 int PMPI_Scan( void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
386                MPI_Op op, MPI_Comm comm )
387 {
388     if ( sendbuf != recvbuf )
389         memcpy( recvbuf, sendbuf, count*datatype );
390     return MPI_SUCCESS;
391 }
392 
PMPI_Allreduce(void * sendbuf,void * recvbuf,int count,MPI_Datatype datatype,MPI_Op op,MPI_Comm comm)393 int PMPI_Allreduce( void *sendbuf, void *recvbuf, int count,
394                     MPI_Datatype datatype, MPI_Op op, MPI_Comm comm )
395 {
396     if ( sendbuf != recvbuf )
397         memcpy( recvbuf, sendbuf, count*datatype );
398     return MPI_SUCCESS;
399 }
400