1 /*********************************************************************
2  *
3  *  Copyright (C) 2012, Northwestern University and Argonne National Laboratory
4  *  See COPYRIGHT notice in top-level directory.
5  *
6  *********************************************************************/
7 /* $Id: pnetcdf-read-nb.c 2245 2015-12-20 18:39:52Z wkliao $ */
8 
9 /* simple demonstration of pnetcdf:
10  * knowing nothing about the file, read in the variables.
11  *
12  * This example demonstrates the non-blocking read interface */
13 
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <mpi.h>
17 #include <pnetcdf.h>
18 
handle_error(int status,int lineno)19 static void handle_error(int status, int lineno)
20 {
21     fprintf(stderr, "Error at line %d: %s\n", lineno, ncmpi_strerror(status));
22     MPI_Abort(MPI_COMM_WORLD, 1);
23 }
24 
main(int argc,char ** argv)25 int main(int argc, char **argv) {
26 
27     int i, j, rank, nprocs, ret;
28     int ncfile, ndims, nvars, ngatts, unlimited, var_ndims, var_natts;;
29     MPI_Offset *dim_sizes, var_size, *start, *count;
30     int *requests, *statuses, dimids[NC_MAX_VAR_DIMS], **data;
31     char varname[NC_MAX_NAME+1];
32     nc_type type;
33 
34     MPI_Init(&argc, &argv);
35 
36     MPI_Comm_rank(MPI_COMM_WORLD, &rank);
37     MPI_Comm_size(MPI_COMM_WORLD, &nprocs);
38 
39     if (argc != 2) {
40         if (rank == 0) printf("Usage: %s filename\n", argv[0]);
41         MPI_Finalize();
42         exit(-1);
43     }
44 
45     ret = ncmpi_open(MPI_COMM_WORLD, argv[1], NC_NOWRITE, MPI_INFO_NULL,
46                      &ncfile);
47     if (ret != NC_NOERR) handle_error(ret, __LINE__);
48 
49     /* reader knows nothing about dataset, but we can interrogate with query
50      * routines: ncmpi_inq tells us how many of each kind of "thing"
51      * (dimension, variable, attribute) we will find in the file  */
52 
53     /* no communication needed after ncmpi_open: all processors have a cached
54      * view of the metadata once ncmpi_open returns */
55 
56     ret = ncmpi_inq(ncfile, &ndims, &nvars, &ngatts, &unlimited);
57     if (ret != NC_NOERR) handle_error(ret, __LINE__);
58 
59     /* we do not really need the name of the dimension or the variable for
60      * reading in this example.  we could, in a different example, take the
61      * name of a variable on the command line and read just that one */
62 
63     dim_sizes = (MPI_Offset*) calloc(ndims, sizeof(MPI_Offset));
64     /* netcdf dimension identifiers are allocated sequentially starting
65      * at zero; same for variable identifiers */
66     for(i=0; i<ndims; i++)  {
67         ret = ncmpi_inq_dimlen(ncfile, i, &(dim_sizes[i]) );
68         if (ret != NC_NOERR) handle_error(ret, __LINE__);
69     }
70 
71     requests = (int*) calloc(nvars, sizeof(int));
72     statuses = (int*) calloc(nvars, sizeof(int));
73 
74     data = (int**) calloc(nvars, sizeof(int*));
75 
76     for(i=0; i<nvars; i++) {
77         /* much less coordination in this case compared to rank 0 doing all
78          * the i/o: everyone already has the necessary information */
79         ret = ncmpi_inq_var(ncfile, i, varname, &type, &var_ndims, dimids,
80                             &var_natts);
81         if (ret != NC_NOERR) handle_error(ret, __LINE__);
82 
83         start = (MPI_Offset*) calloc(var_ndims, sizeof(MPI_Offset));
84         count = (MPI_Offset*) calloc(var_ndims, sizeof(MPI_Offset));
85 
86         /* we will simply decompose along one dimension.  Generally the
87          * application has some algorithm for domain decomposition.  Note
88          * that data decomposition can have an impact on i/o performance.
89          * Often it's best just to do what is natural for the application,
90          * but something to consider if performance is not what was
91          * expected/desired */
92 
93         start[0] = (dim_sizes[dimids[0]]/nprocs)*rank;
94         count[0] = (dim_sizes[dimids[0]]/nprocs);
95         var_size = count[0];
96 
97         for (j=1; j<var_ndims; j++) {
98             start[j] = 0;
99             count[j] = dim_sizes[dimids[j]];
100             var_size *= count[j];
101         }
102 
103         switch(type) {
104             case NC_INT:
105                 data[i] = (int*) calloc(var_size, sizeof(int));
106                 /* as with the writes, this call is independent: we
107                  * will do any coordination (if desired) in a
108                  * subsequent ncmpi_wait_all() call */
109                 ret = ncmpi_iget_vara(ncfile, i, start, count, data[i],
110                                       var_size, MPI_INT, &requests[i]);
111                 if (ret != NC_NOERR) handle_error(ret, __LINE__);
112                 break;
113             default:
114                 /* we can do this for all the known netcdf types but this
115                  * example is already getting too long  */
116                 fprintf(stderr, "unsupported NetCDF type \n");
117         }
118 
119         free(start);
120         free(count);
121     }
122 
123     ret = ncmpi_wait_all(ncfile, nvars, requests, statuses);
124     if (ret != NC_NOERR) handle_error(ret, __LINE__);
125 
126     /* check status of each nonblocking call */
127     for (i=0; i<nvars; i++)
128         if (statuses[i] != NC_NOERR) handle_error(statuses[i], __LINE__);
129 
130     /* now that the ncmpi_wait_all has returned, the caller can do stuff with
131      * the buffers passed in to the non-blocking operations.  The buffer reuse
132      * rules are similar to MPI non-blocking messages */
133 
134     for (i=0; i<nvars; i++) {
135         if (data[i] != NULL) free(data[i]);
136     }
137     free(data);
138     free(dim_sizes);
139     free(requests);
140     free(statuses);
141 
142     ret = ncmpi_close(ncfile);
143     if (ret != NC_NOERR) handle_error(ret, __LINE__);
144 
145     MPI_Finalize();
146     return 0;
147 }
148 
149 /*
150  *vim: ts=8 sts=4 sw=4 noexpandtab */
151