1 /* This is a benchmarking program for netCDF-4 parallel I/O. */
2
3 /* Defining USE_MPE causes the MPE trace library to be used (and you
4 * must also relink with -llmpe -lmpe). This causes clog2 output to be
5 * written, which can be converted to slog2 (by the program
6 * clog2TOslog2) and then used in the analysis program jumpshot. */
7 /*#define USE_MPE 1*/
8
9 #include <nc_tests.h>
10 #include "err_macros.h"
11 #include <mpi.h>
12 #include <pnetcdf.h>
13
14 #ifdef USE_MPE
15 #include <mpe.h>
16 #endif /* USE_MPE */
17
18 #undef DEBUG
19
20 #define FILE_NAME "tst_parallel2.nc"
21 #define NDIMS 3
22 #define DIMSIZE 8
23 #define NUM_SLABS 8
24 #define DIM1_NAME "slab"
25 #define DIM2_NAME "x"
26 #define DIM3_NAME "y"
27 #define VAR_NAME "Bond_James_Bond"
28
29 int
main(int argc,char ** argv)30 main(int argc, char **argv)
31 {
32 /* MPI stuff. */
33 int mpi_namelen;
34 char mpi_name[MPI_MAX_PROCESSOR_NAME];
35 int mpi_size, mpi_rank;
36 MPI_Comm comm = MPI_COMM_WORLD;
37 MPI_Info info = MPI_INFO_NULL;
38 double start_time = 0, total_time;
39
40 /* Netcdf-4 stuff. */
41 int ncid, varid, dimids[NDIMS];
42 size_t start[NDIMS] = {0, 0, 0};
43 size_t count[NDIMS] = {1, DIMSIZE, DIMSIZE};
44 int data[DIMSIZE * DIMSIZE], data_in[DIMSIZE * DIMSIZE];
45 int j, i;
46
47 char file_name[NC_MAX_NAME + 1];
48 int ndims_in, nvars_in, natts_in, unlimdimid_in;
49
50 #ifdef USE_MPE
51 int s_init, e_init, s_define, e_define, s_write, e_write, s_close, e_close;
52 #endif /* USE_MPE */
53
54 /* Initialize MPI. */
55 MPI_Init(&argc,&argv);
56 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
57 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
58 MPI_Get_processor_name(mpi_name, &mpi_namelen);
59 #ifdef DEBUG
60 printf("mpi_name: %s size: %d rank: %d\n", mpi_name, mpi_size, mpi_rank);
61 #endif
62
63 /* Must be able to evenly divide my slabs between processors. */
64 if (NUM_SLABS % mpi_size != 0)
65 {
66 if (!mpi_rank) printf("NUM_SLABS (%d) is not evenly divisible by mpi_size(%d)\n",
67 NUM_SLABS, mpi_size);
68 ERR;
69 }
70
71 #ifdef USE_MPE
72 MPE_Init_log();
73 s_init = MPE_Log_get_event_number();
74 e_init = MPE_Log_get_event_number();
75 s_define = MPE_Log_get_event_number();
76 e_define = MPE_Log_get_event_number();
77 s_write = MPE_Log_get_event_number();
78 e_write = MPE_Log_get_event_number();
79 s_close = MPE_Log_get_event_number();
80 e_close = MPE_Log_get_event_number();
81 s_open = MPE_Log_get_event_number();
82 e_open = MPE_Log_get_event_number();
83 MPE_Describe_state(s_init, e_init, "Init", "red");
84 MPE_Describe_state(s_define, e_define, "Define", "yellow");
85 MPE_Describe_state(s_write, e_write, "Write", "green");
86 MPE_Describe_state(s_close, e_close, "Close", "purple");
87 MPE_Describe_state(s_open, e_open, "Open", "blue");
88 MPE_Start_log();
89 MPE_Log_event(s_init, 0, "start init");
90 #endif /* USE_MPE */
91
92 #ifdef DEBUG
93 if (!mpi_rank)
94 {
95 printf("\n*** Testing parallel I/O some more.\n");
96 printf("*** writing a %d x %d x %d file from %d processors...\n",
97 NUM_SLABS, DIMSIZE, DIMSIZE, mpi_size);
98 }
99 #endif
100
101 /* We will write the same slab over and over. */
102 for (i = 0; i < DIMSIZE * DIMSIZE; i++)
103 data[i] = mpi_rank;
104
105 #ifdef USE_MPE
106 MPE_Log_event(e_init, 0, "end init");
107 MPE_Log_event(s_define, 0, "start define file");
108 #endif /* USE_MPE */
109
110 /* Create a parallel netcdf-4 file. */
111 sprintf(file_name, "%s/%s", TEMP_LARGE, FILE_NAME);
112 #ifdef DEBUG
113 fprintf(stderr,"create: file_name=%s\n",file_name);
114 #endif
115 if (nc_create_par(file_name, 0, comm, info, &ncid)) ERR;
116
117 /* A global attribute holds the number of processors that created
118 * the file. */
119 if (nc_put_att_int(ncid, NC_GLOBAL, "num_processors", NC_INT, 1, &mpi_size)) ERR;
120
121 /* Create three dimensions. */
122 if (nc_def_dim(ncid, DIM1_NAME, NUM_SLABS, dimids)) ERR;
123 if (nc_def_dim(ncid, DIM2_NAME, DIMSIZE, &dimids[1])) ERR;
124 if (nc_def_dim(ncid, DIM3_NAME, DIMSIZE, &dimids[2])) ERR;
125
126 /* Create one var. */
127 if (nc_def_var(ncid, VAR_NAME, NC_INT, NDIMS, dimids, &varid)) ERR;
128
129 /* Write metadata to file. */
130 if (nc_enddef(ncid)) ERR;
131
132 #ifdef USE_MPE
133 MPE_Log_event(e_define, 0, "end define file");
134 if (mpi_rank)
135 sleep(mpi_rank);
136 #endif /* USE_MPE */
137
138 #ifdef USE_PNETCDF
139 /* if (nc_var_par_access(ncid, NC_GLOBAL, NC_COLLECTIVE)) ERR;*/
140 if (nc_var_par_access(ncid, NC_GLOBAL, NC_INDEPENDENT)) ERR;
141 #else
142 /* if (nc_var_par_access(ncid, varid, NC_COLLECTIVE)) ERR;*/
143 if (nc_var_par_access(ncid, varid, NC_INDEPENDENT)) ERR;
144 #endif
145
146 if (!mpi_rank)
147 start_time = MPI_Wtime();
148
149 /* Write all the slabs this process is responsible for. */
150 for (i = 0; i < NUM_SLABS / mpi_size; i++)
151 {
152 start[0] = NUM_SLABS / mpi_size * mpi_rank + i;
153
154 #ifdef USE_MPE
155 MPE_Log_event(s_write, 0, "start write slab");
156 #endif /* USE_MPE */
157
158 /* Write one slab of data. */
159 if (nc_put_vara_int(ncid, varid, start, count, data)) ERR;
160
161 #ifdef USE_MPE
162 MPE_Log_event(e_write, 0, "end write file");
163 #endif /* USE_MPE */
164 }
165
166 if (!mpi_rank)
167 {
168 total_time = MPI_Wtime() - start_time;
169 /* printf("num_proc\ttime(s)\n");*/
170 printf("%d\t%g\t%g\n", mpi_size, total_time, DIMSIZE * DIMSIZE * NUM_SLABS * sizeof(int) / total_time);
171 }
172
173 #ifdef USE_MPE
174 MPE_Log_event(s_close, 0, "start close file");
175 #endif /* USE_MPE */
176
177 /* Close the netcdf file. */
178 if (nc_close(ncid)) ERR;
179
180 #ifdef USE_MPE
181 MPE_Log_event(e_close, 0, "end close file");
182 #endif /* USE_MPE */
183
184 /* Reopen the file and check it. */
185 #ifdef DEBUG
186 fprintf(stderr,"open: file_name=%s\n",file_name);
187 #endif
188 if (nc_open_par(file_name, NC_NOWRITE, comm, info, &ncid)) ERR;
189 if (nc_inq(ncid, &ndims_in, &nvars_in, &natts_in, &unlimdimid_in)) ERR;
190 if (ndims_in != NDIMS || nvars_in != 1 || natts_in != 1 ||
191 unlimdimid_in != -1) ERR;
192
193 /* Read all the slabs this process is responsible for. */
194 for (i = 0; i < NUM_SLABS / mpi_size; i++)
195 {
196 start[0] = NUM_SLABS / mpi_size * mpi_rank + i;
197
198 #ifdef USE_MPE
199 MPE_Log_event(s_read, 0, "start read slab");
200 #endif /* USE_MPE */
201
202 /* Read one slab of data. */
203 if (nc_get_vara_int(ncid, varid, start, count, data_in)) ERR;
204
205 /* Check data. */
206 for (j = 0; j < DIMSIZE * DIMSIZE; j++)
207 if (data_in[j] != mpi_rank)
208 {
209 ERR;
210 break;
211 }
212
213 #ifdef USE_MPE
214 MPE_Log_event(e_read, 0, "end read file");
215 #endif /* USE_MPE */
216 }
217
218 #ifdef USE_MPE
219 MPE_Log_event(s_close, 0, "start close file");
220 #endif /* USE_MPE */
221
222 /* Close the netcdf file. */
223 if (nc_close(ncid)) ERR;
224
225 #ifdef USE_MPE
226 MPE_Log_event(e_close, 0, "end close file");
227 #endif /* USE_MPE */
228
229 MPI_Barrier(MPI_COMM_WORLD);
230 if (mpi_rank == 0)
231 remove(file_name);
232
233 /* Shut down MPI. */
234 MPI_Finalize();
235
236 #ifdef DEBUG
237 if (!mpi_rank)
238 {
239 SUMMARIZE_ERR;
240 FINAL_RESULTS;
241 }
242 #endif
243
244 return total_err;
245 }
246