1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2 * Copyright by the Board of Trustees of the University of Illinois. *
3 * All rights reserved. *
4 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
5 /* This test of netCDF-4 parallel I/O was contributed by the HDF5
6 * team. */
7
8 #include <nc_tests.h>
9 #include "err_macros.h"
10 #define FILE_NAME "tst_parallel3.nc"
11
12 /*2,3,4 dimensional test, the first dimension is unlimited, time.
13 */
14
15 #define NDIMS1 2
16 #define NDIMS2 4
17 #define DIMSIZE /*4 */ 768*2
18 #define DIMSIZE2 4
19 #define DIMSIZE3 4
20 #define TIMELEN 1
21
22 /*BIGFILE, >2G, >4G, >8G file
23 big file is created but no actually data is written
24 Dimensional size is defined inside the function
25 */
26
27 #define ATTRNAME1 "valid_range"
28 #define ATTRNAME2 "scale_factor"
29 #define ATTRNAME3 "title"
30
31 /* The number of processors should be a good number for the
32 dimension to be divided evenly, the best set of number of processor
33 should be 2 power n. However, for NetCDF4 tests, the following numbers
34 are generally treated as good numbers:
35 1,2,3,4,6,8,12,16
36
37 The maximum number of processor is 16.*/
38
39 int test_pio(int);
40 int test_pio_attr(int);
41 int test_pio_big(int);
42 int test_pio_hyper(int);
43 int test_pio_extend(int);
44
45 char* getenv_all(MPI_Comm comm, int root, const char* name);
46 int facc_type;
47 int facc_type_open;
48 char file_name[NC_MAX_NAME + 1];
49
main(int argc,char ** argv)50 int main(int argc, char **argv)
51 {
52 int mpi_size, mpi_rank; /* mpi variables */
53 int i;
54 int NUMP[8] ={1,2,3,4,6,8,12,16};
55 int size_flag = 0;
56
57 /* Un-buffer the stdout and stderr */
58 setbuf(stderr, NULL);
59 setbuf(stdout, NULL);
60
61 MPI_Init(&argc, &argv);
62 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
63 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
64
65 if (mpi_rank == 0)
66 printf("\n*** Testing more advanced parallel access.\n");
67
68 for (i = 0; i < 8; i++){
69 if(mpi_size == NUMP[i])
70 {
71 size_flag = 1;
72 break;
73 }
74 }
75 if(!size_flag){
76 printf("mpi_size is wrong\n");
77 printf(" The number of processor must be chosen from\n");
78 printf(" 1,2,3,4,6,8,12,16 \n");
79 return -1;
80 }
81
82 facc_type = NC_NETCDF4;
83 facc_type_open = 0;
84
85 /* Create file name. */
86 sprintf(file_name, "%s/%s", TEMP_LARGE, FILE_NAME);
87
88 /* Test NetCDF4 with MPI-IO driver */
89 if (mpi_rank == 0)
90 printf("*** Testing parallel IO for raw-data with MPI-IO (driver)...");
91 if(test_pio(NC_INDEPENDENT)!=0) ERR;
92 if(test_pio(NC_COLLECTIVE)!=0) ERR;
93 if (mpi_rank == 0)
94 SUMMARIZE_ERR;
95
96 if (mpi_rank == 0)
97 printf("*** Testing parallel IO for meta-data with MPI-IO (driver)...");
98 if(test_pio_attr(NC_INDEPENDENT)!=0) ERR;
99 if(test_pio_attr(NC_COLLECTIVE)!=0) ERR;
100 if (mpi_rank == 0)
101 SUMMARIZE_ERR;
102
103 if (mpi_rank == 0)
104 printf("*** Testing parallel IO for different hyperslab selections with MPI-IO (driver)...");
105 if(test_pio_hyper(NC_INDEPENDENT)!=0)ERR;
106 if(test_pio_hyper(NC_COLLECTIVE)!=0) ERR;
107 if (mpi_rank == 0)
108 SUMMARIZE_ERR;
109
110 if (mpi_rank == 0)
111 printf("*** Testing parallel IO for extending variables with MPI-IO (driver)...");
112 if(test_pio_extend(NC_COLLECTIVE)!=0) ERR;
113 if (mpi_rank == 0)
114 SUMMARIZE_ERR;
115
116 if (mpi_rank == 0)
117 printf("*** Testing parallel IO for raw-data with MPIPOSIX-IO (driver)...");
118 facc_type = NC_NETCDF4;
119 facc_type_open = 0;
120 if(test_pio(NC_INDEPENDENT)!=0) ERR;
121 if(test_pio(NC_COLLECTIVE)!=0) ERR;
122 if (mpi_rank == 0)
123 SUMMARIZE_ERR;
124
125 if (mpi_rank == 0)
126 printf("*** Testing parallel IO for meta-data with MPIPOSIX-IO (driver)...");
127 if(test_pio_attr(NC_INDEPENDENT)!=0) ERR;
128 if(test_pio_attr(NC_COLLECTIVE)!=0) ERR;
129 if (mpi_rank == 0)
130 SUMMARIZE_ERR;
131
132 if (mpi_rank == 0)
133 printf("*** Testing parallel IO for different hyperslab selections "
134 "with MPIPOSIX-IO (driver)...");
135 if(test_pio_hyper(NC_INDEPENDENT)!=0)ERR;
136 if(test_pio_hyper(NC_COLLECTIVE)!=0) ERR;
137 if (mpi_rank == 0)
138 SUMMARIZE_ERR;
139
140 if (mpi_rank == 0)
141 printf("*** Testing parallel IO for extending variables with MPIPOSIX-IO (driver)...");
142 if(test_pio_extend(NC_COLLECTIVE)!=0) ERR;
143 if (mpi_rank == 0)
144 SUMMARIZE_ERR;
145
146 /* if(!getenv_all(MPI_COMM_WORLD,0,"NETCDF4_NOCLEANUP")) */
147 remove(file_name);
148 MPI_Finalize();
149
150 if (mpi_rank == 0)
151 FINAL_RESULTS;
152 return 0;
153 }
154
155 /* Both read and write will be tested */
test_pio(int flag)156 int test_pio(int flag)
157 {
158 /* MPI stuff. */
159 int mpi_size, mpi_rank;
160 MPI_Comm comm = MPI_COMM_WORLD;
161 MPI_Info info = MPI_INFO_NULL;
162
163 /* Netcdf-4 stuff. */
164 int ncid;
165 int nvid,uvid;
166 int rvid;
167 unsigned m,k,j,i;
168
169 /* two dimensional integer data test */
170 int dimids[NDIMS1];
171 size_t start[NDIMS1];
172 size_t count[NDIMS1];
173
174 int *data;
175 int *tempdata;
176 int *rdata;
177 int *temprdata;
178
179 /* four dimensional integer data test,
180 time dimension is unlimited.*/
181 int dimuids[NDIMS2];
182 size_t ustart[NDIMS2];
183 size_t ucount[NDIMS2];
184
185 int *udata;
186 int *tempudata;
187 int *rudata;
188 int *temprudata;
189
190 /* Initialize MPI. */
191 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
192 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
193
194 /* Create a parallel netcdf-4 file. */
195 if (nc_create_par(file_name, facc_type, comm, info, &ncid)) ERR;
196
197 /* The first case is two dimensional variables, no unlimited dimension */
198
199 /* Create two dimensions. */
200 if (nc_def_dim(ncid, "d1", DIMSIZE2, dimids)) ERR;
201 if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
202
203 /* Create one var. */
204 if (nc_def_var(ncid, "v1", NC_INT, NDIMS1, dimids, &nvid)) ERR;
205
206 if (nc_enddef(ncid)) ERR;
207
208 /* Set up slab for this process. */
209 start[0] = 0;
210 start[1] = mpi_rank * DIMSIZE/mpi_size;
211 count[0] = DIMSIZE2;
212 count[1] = DIMSIZE/mpi_size;
213
214 /* start parallel netcdf4 */
215 if (nc_var_par_access(ncid, nvid, flag)) ERR;
216
217 if (!(data = malloc(sizeof(int)*count[1]*count[0]))) ERR;
218 tempdata = data;
219 for (j = 0; j < count[0]; j++){
220 for (i = 0; i < count[1]; i++)
221 {
222 *tempdata = mpi_rank * (j + 1);
223 tempdata++;
224 }
225 }
226
227 /* Write two dimensional integer data */
228 if (nc_put_vara_int(ncid, nvid, start, count, data)) ERR;
229 free(data);
230
231 /* Case 2: create four dimensional integer data,
232 one dimension is unlimited. */
233
234 /* Create four dimensions. */
235 if (nc_def_dim(ncid, "ud1", NC_UNLIMITED, dimuids)) ERR;
236 if (nc_def_dim(ncid, "ud2", DIMSIZE3, &dimuids[1])) ERR;
237 if (nc_def_dim(ncid, "ud3", DIMSIZE2, &dimuids[2])) ERR;
238 if (nc_def_dim(ncid, "ud4", DIMSIZE, &dimuids[3])) ERR;
239
240 /* Create one var. */
241 if (nc_def_var(ncid, "uv1", NC_INT, NDIMS2, dimuids, &uvid)) ERR;
242
243 if (nc_enddef(ncid)) ERR;
244
245 /* Set up selection parameters */
246 ustart[0] = 0;
247 ustart[1] = 0;
248 ustart[2] = 0;
249 ustart[3] = DIMSIZE*mpi_rank/mpi_size;
250 ucount[0] = TIMELEN;
251 ucount[1] = DIMSIZE3;
252 ucount[2] = DIMSIZE2;
253 ucount[3] = DIMSIZE/mpi_size;
254
255 /* Access parallel */
256 if (nc_var_par_access(ncid, uvid, flag)) ERR;
257
258 /* Create phony data. */
259 if (!(udata = malloc(ucount[0]*ucount[1]*ucount[2]*ucount[3]*sizeof(int)))) ERR;
260 tempudata = udata;
261 for( m=0; m<ucount[0];m++)
262 for( k=0; k<ucount[1];k++)
263 for (j=0; j<ucount[2];j++)
264 for (i=0; i<ucount[3]; i++)
265 {
266 *tempudata = (1+mpi_rank)*2*(j+1)*(k+1)*(m+1);
267 tempudata++;
268 }
269
270 /* Write slabs of phoney data. */
271 if (NC_INDEPENDENT == flag) {
272 int res;
273 res = nc_put_vara_int(ncid, uvid, ustart, ucount, udata);
274 if(res != NC_ECANTEXTEND) ERR;
275 }
276 else {
277 if (nc_put_vara_int(ncid, uvid, ustart, ucount, udata)) ERR;
278 }
279 free(udata);
280
281 /* Close the netcdf file. */
282 if (nc_close(ncid)) ERR;
283
284 if (nc_open_par(file_name, facc_type_open, comm, info, &ncid)) ERR;
285
286 /* Case 1: read two-dimensional variables, no unlimited dimension */
287 /* Set up slab for this process. */
288 start[0] = 0;
289 start[1] = mpi_rank * DIMSIZE/mpi_size;
290 count[0] = DIMSIZE2;
291 count[1] = DIMSIZE/mpi_size;
292
293 if (nc_inq_varid(ncid, "v1", &rvid)) ERR;
294
295 if (nc_var_par_access(ncid, rvid, flag)) ERR;
296
297 if (!(rdata = malloc(sizeof(int)*count[1]*count[0]))) ERR;
298 if (nc_get_vara_int(ncid, rvid, start, count, rdata)) ERR;
299
300 temprdata = rdata;
301 for (j=0; j<count[0];j++){
302 for (i=0; i<count[1]; i++){
303 if(*temprdata != mpi_rank*(j+1))
304 {
305 ERR_RET;
306 break;
307 }
308 temprdata++;
309 }
310 }
311 free(rdata);
312
313 /* Case 2: read four dimensional data, one dimension is unlimited. */
314
315 /* set up selection parameters */
316 ustart[0] = 0;
317 ustart[1] = 0;
318 ustart[2] = 0;
319 ustart[3] = DIMSIZE*mpi_rank/mpi_size;
320 ucount[0] = TIMELEN;
321 ucount[1] = DIMSIZE3;
322 ucount[2] = DIMSIZE2;
323 ucount[3] = DIMSIZE/mpi_size;
324
325 /* Inquiry the data */
326 /* (NOTE: This variable isn't written out, when access is independent) */
327 if (NC_INDEPENDENT != flag) {
328 if (nc_inq_varid(ncid, "uv1", &rvid)) ERR;
329
330 /* Access the parallel */
331 if (nc_var_par_access(ncid, rvid, flag)) ERR;
332
333 if (!(rudata = malloc(ucount[0]*ucount[1]*ucount[2]*ucount[3]*sizeof(int)))) ERR;
334 temprudata = rudata;
335
336 /* Read data */
337 if (nc_get_vara_int(ncid, rvid, ustart, ucount, rudata)) ERR;
338
339 for(m = 0; m < ucount[0]; m++)
340 for(k = 0; k < ucount[1]; k++)
341 for(j = 0; j < ucount[2]; j++)
342 for(i = 0; i < ucount[3]; i++)
343 {
344 if(*temprudata != (1+mpi_rank)*2*(j+1)*(k+1)*(m+1))
345 ERR_RET;
346 temprudata++;
347 }
348
349 free(rudata);
350 }
351
352 /* Close the netcdf file. */
353 if (nc_close(ncid)) ERR;
354
355 return 0;
356 }
357
358
359 /* Attributes: both read and write will be tested for parallel NetCDF*/
360
test_pio_attr(int flag)361 int test_pio_attr(int flag)
362 {
363 /* MPI stuff. */
364 int mpi_size, mpi_rank;
365 MPI_Comm comm = MPI_COMM_WORLD;
366 MPI_Info info = MPI_INFO_NULL;
367
368 /* Netcdf-4 stuff. */
369 int ncid;
370 int nvid;
371 int j, i;
372
373 double rh_range[2];
374 static char title[] = "parallel attr to netCDF";
375 nc_type st_type,vr_type;
376 size_t vr_len,st_len;
377 size_t orivr_len;
378 double *vr_val;
379 char *st_val;
380
381 /* two dimensional integer data*/
382 int dimids[NDIMS1];
383 size_t start[NDIMS1];
384 size_t count[NDIMS1];
385 int *data;
386 int *tempdata;
387
388 /* Initialize MPI. */
389 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
390 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
391
392 /* Create a parallel netcdf-4 file. */
393 /* nc_set_log_level(NC_TURN_OFF_LOGGING); */
394 /* nc_set_log_level(3);*/
395
396 if (nc_create_par(file_name, facc_type, comm, info, &ncid)) ERR;
397
398 /* Create a 2-D variable so that an attribute can be added. */
399 if (nc_def_dim(ncid, "d1", DIMSIZE2, dimids)) ERR;
400 if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
401
402 /* Create one var. */
403 if (nc_def_var(ncid, "v1", NC_INT, NDIMS1, dimids, &nvid)) ERR;
404
405 orivr_len = 2;
406 rh_range[0] = 1.0;
407 rh_range[1] = 1000.0;
408
409 /* Write attributes of a variable */
410
411 if (nc_put_att_double (ncid, nvid, "valid_range", NC_DOUBLE,
412 orivr_len, rh_range)) ERR;
413
414 if (nc_put_att_text (ncid, nvid, "title", strlen(title),
415 title)) ERR;
416
417 /* Write global attributes */
418 if (nc_put_att_double (ncid, NC_GLOBAL, "g_valid_range", NC_DOUBLE,
419 orivr_len, rh_range)) ERR;
420 if (nc_put_att_text (ncid, NC_GLOBAL, "g_title", strlen(title), title)) ERR;
421
422 if (nc_enddef(ncid)) ERR;
423
424 /* Set up slab for this process. */
425 start[0] = 0;
426 start[1] = mpi_rank * DIMSIZE/mpi_size;
427 count[0] = DIMSIZE2;
428 count[1] = DIMSIZE/mpi_size;
429
430 /* Access parallel */
431 if (nc_var_par_access(ncid, nvid, flag)) ERR;
432
433 /* Allocating data */
434 data = malloc(sizeof(int)*count[1]*count[0]);
435 tempdata = data;
436 for(j = 0; j < count[0]; j++)
437 for (i = 0; i < count[1]; i++)
438 {
439 *tempdata = mpi_rank * (j + 1);
440 tempdata++;
441 }
442
443 if (nc_put_vara_int(ncid, nvid, start, count, data)) ERR;
444 free(data);
445
446 /* Close the netcdf file. */
447 if (nc_close(ncid)) ERR;
448
449 /* Read attributes */
450 if (nc_open_par(file_name, facc_type_open, comm, info, &ncid)) ERR;
451
452 /* Set up slab for this process. */
453 start[0] = 0;
454 start[1] = mpi_rank * DIMSIZE/mpi_size;
455 count[0] = DIMSIZE2;
456 count[1] = DIMSIZE/mpi_size;
457
458 /* Inquiry variable */
459 if (nc_inq_varid(ncid, "v1", &nvid)) ERR;
460
461 /* Access parallel */
462 if (nc_var_par_access(ncid, nvid, flag)) ERR;
463
464 /* Inquiry attribute */
465 if (nc_inq_att (ncid, nvid, "valid_range", &vr_type, &vr_len)) ERR;
466
467 /* check stuff */
468 if(vr_type != NC_DOUBLE || vr_len != orivr_len) ERR;
469
470 vr_val = (double *) malloc(vr_len * sizeof(double));
471
472 /* Get variable attribute values */
473 if (nc_get_att_double(ncid, nvid, "valid_range", vr_val)) ERR;
474
475 /* Check variable attribute value */
476 for(i = 0; i < vr_len; i++)
477 if (vr_val[i] != rh_range[i])
478 ERR_RET;
479 free(vr_val);
480
481 /* Inquiry global attribute */
482 if (nc_inq_att (ncid, NC_GLOBAL, "g_valid_range", &vr_type, &vr_len)) ERR;
483
484 /* Check stuff. */
485 if(vr_type != NC_DOUBLE || vr_len != orivr_len) ERR;
486
487 /* Obtain global attribute value */
488 vr_val = (double *) malloc(vr_len * sizeof(double));
489 if (nc_get_att_double(ncid, NC_GLOBAL, "g_valid_range", vr_val)) ERR;
490
491 /* Check global attribute value */
492 for(i = 0; i < vr_len; i++)
493 if (vr_val[i] != rh_range[i]) ERR_RET;
494 free(vr_val);
495
496 /* Inquiry string attribute of a variable */
497 if (nc_inq_att (ncid, nvid, "title", &st_type, &st_len)) ERR;
498
499 /* check string attribute length */
500 if(st_len != strlen(title)) ERR_RET;
501
502 /* Check string attribute type */
503 if(st_type != NC_CHAR) ERR_RET;
504
505 /* Allocate meory for string attribute */
506 st_val = (char *) malloc(st_len * (sizeof(char)));
507
508 /* Obtain variable string attribute value */
509 if (nc_get_att_text(ncid, nvid,"title", st_val)) ERR;
510
511 /*check string value */
512 if(strncmp(st_val,title,st_len)) {
513 free(st_val);
514 ERR_RET;
515 }
516 free(st_val);
517
518 /*Inquiry global attribute */
519 if (nc_inq_att (ncid, NC_GLOBAL, "g_title", &st_type, &st_len)) ERR;
520
521 /* check attribute length*/
522 if(st_len != strlen(title)) ERR_RET;
523
524 /*check attribute type*/
525 if(st_type != NC_CHAR) ERR_RET;
526
527 /* obtain global string attribute value */
528 st_val = (char*)malloc(st_len*sizeof(char));
529 if (nc_get_att_text(ncid, NC_GLOBAL,"g_title", st_val)) ERR;
530
531 /* check attribute value */
532 if(strncmp(st_val,title,st_len)){
533 free(st_val);
534 ERR_RET;
535 }
536 free(st_val);
537
538 /* Close the netcdf file. */
539 if (nc_close(ncid)) ERR;
540
541 return 0;
542 }
543
544 /* test different hyperslab settings */
test_pio_hyper(int flag)545 int test_pio_hyper(int flag){
546
547 /* MPI stuff. */
548 int mpi_size, mpi_rank;
549 int res = NC_NOERR;
550 MPI_Comm comm = MPI_COMM_WORLD;
551 MPI_Info info = MPI_INFO_NULL;
552
553 /* Netcdf-4 stuff. */
554 int ncid;
555 int nvid;
556 int rvid;
557 int j, i;
558
559 /* two dimensional integer data test */
560 int dimids[NDIMS1];
561 size_t start[NDIMS1], count[NDIMS1];
562 int *data;
563 int *tempdata;
564 int *rdata;
565 int *temprdata;
566 int count_atom;
567
568
569 /* Initialize MPI. */
570 MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
571 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
572
573 if(mpi_size == 1) return 0;
574
575 /* Create a parallel netcdf-4 file. */
576 /* nc_set_log_level(NC_TURN_OFF_LOGGING); */
577 /* nc_set_log_level(4);*/
578
579 if (nc_create_par(file_name, facc_type, comm, info, &ncid)) ERR;
580
581 /* The case is two dimensional variables, no unlimited dimension */
582
583 /* Create two dimensions. */
584 if (nc_def_dim(ncid, "d1", DIMSIZE2, dimids)) ERR;
585 if (nc_def_dim(ncid, "d2", DIMSIZE, &dimids[1])) ERR;
586
587 /* Create one var. */
588 if (nc_def_var(ncid, "v1", NC_INT, NDIMS1, dimids, &nvid)) ERR;
589
590 if (nc_enddef(ncid)) ERR;
591
592
593 /* hyperslab illustration for 3-processor case
594
595 --------
596 |aaaacccc|
597 |aaaacccc|
598 |bbbb |
599 |bbbb |
600 --------
601 */
602
603 /* odd number of processors should be treated differently */
604 if(mpi_size%2 != 0) {
605
606 count_atom = DIMSIZE*2/(mpi_size+1);
607 if(mpi_rank <= mpi_size/2) {
608 start[0] = 0;
609 start[1] = mpi_rank*count_atom;
610 count[0] = DIMSIZE2/2;
611 count[1] = count_atom;
612 }
613 else {
614 start[0] = DIMSIZE2/2;
615 start[1] = (mpi_rank-mpi_size/2-1)*count_atom;
616 count[0] = DIMSIZE2/2;
617 count[1] = count_atom;
618 }
619 }
620 else {
621
622 count_atom = DIMSIZE*2/mpi_size;
623 if(mpi_rank < mpi_size/2) {
624 start[0] = 0;
625 start[1] = mpi_rank*count_atom;
626 count[0] = DIMSIZE2/2;
627 count[1] = count_atom;
628 }
629 else {
630 start[0] = DIMSIZE2/2;
631 start[1] = (mpi_rank-mpi_size/2)*count_atom;
632 count[0] = DIMSIZE2/2;
633 count[1] = count_atom;
634 }
635 }
636
637 if (nc_var_par_access(ncid, nvid, flag)) ERR;
638 data = malloc(sizeof(int)*count[1]*count[0]);
639 tempdata = data;
640 for (j=0; j<count[0];j++){
641 for (i=0; i<count[1]; i++){
642 *tempdata = mpi_rank*(j+1);
643 tempdata ++;
644 }
645 }
646
647
648 if (nc_put_vara_int(ncid, nvid, start, count, data)) ERR;
649 free(data);
650
651 /* Close the netcdf file. */
652 if (nc_close(ncid)) ERR;
653
654 if (nc_open_par(file_name, facc_type_open, comm, info, &ncid)) ERR;
655
656 /* Inquiry the variable */
657 if (nc_inq_varid(ncid, "v1", &rvid)) ERR;
658
659 if (nc_var_par_access(ncid, rvid, flag)) ERR;
660
661 rdata = malloc(sizeof(int)*count[1]*count[0]);
662 /* Read the data with the same slab settings */
663 if (nc_get_vara_int(ncid, rvid, start, count, rdata)) ERR;
664
665 temprdata = rdata;
666 for (j=0; j<count[0];j++){
667 for (i=0; i<count[1]; i++){
668 if(*temprdata != mpi_rank*(j+1))
669 {
670 res = -1;
671 break;
672 }
673 temprdata++;
674 }
675 }
676
677 free(rdata);
678 if(res == -1) ERR_RET;
679
680 /* Close the netcdf file. */
681 if (nc_close(ncid)) ERR;
682
683 return 0;
684 }
685
686 /* test extending variables */
test_pio_extend(int flag)687 int test_pio_extend(int flag){
688 int rank, procs;
689 int ncFile;
690 int ncDimPart;
691 int ncDimVrtx;
692 int ncVarVrtx;
693 int dimsVrtx[2];
694 size_t start[2];
695 size_t count[2];
696 int vertices[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
697
698 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
699 MPI_Comm_size(MPI_COMM_WORLD, &procs);
700
701 /* Create netcdf file */
702 if (nc_create_par("test.nc", NC_NETCDF4, MPI_COMM_WORLD, MPI_INFO_NULL, &ncFile)) ERR;
703
704 /* Create netcdf dimensions */
705 if (nc_def_dim(ncFile, "partitions", procs, &ncDimPart)) ERR;
706 if (nc_def_dim(ncFile, "vertices", NC_UNLIMITED, &ncDimVrtx)) ERR;
707
708 /* Create netcdf variables */
709 dimsVrtx[0] = ncDimPart;
710 dimsVrtx[1] = ncDimVrtx;
711 if (nc_def_var(ncFile, "vertex", NC_INT, 2, dimsVrtx, &ncVarVrtx)) ERR;
712
713 /* Start writing data */
714 if (nc_enddef(ncFile)) ERR;
715
716 /* Set access mode */
717 if (nc_var_par_access(ncFile, ncVarVrtx, flag)) ERR;
718
719 /* Write vertices */
720 start[0] = rank;
721 start[1] = 0;
722 count[0] = 1;
723 count[1] = rank;
724 if (nc_put_vara_int(ncFile, ncVarVrtx, start, count, vertices)) ERR;
725
726 /* Close netcdf file */
727 if (nc_close(ncFile)) ERR;
728
729 return 0;
730 }
731
732 /*-------------------------------------------------------------------------
733 * Function: getenv_all
734 *
735 * Purpose: Used to get the environment that the root MPI task has.
736 * name specifies which environment variable to look for
737 * val is the string to which the value of that environment
738 * variable will be copied.
739 *
740 * NOTE: The pointer returned by this function is only
741 * valid until the next call to getenv_all and the data
742 * stored there must be copied somewhere else before any
743 * further calls to getenv_all take place.
744 *
745 * Return: pointer to a string containing the value of the environment variable
746 * NULL if the varialbe doesn't exist in task 'root's environment.
747 *
748 * Programmer: Leon Arber
749 * 4/4/05
750 *
751 * Modifications:
752 *
753 *-------------------------------------------------------------------------
754 */
755
getenv_all(MPI_Comm comm,int root,const char * name)756 char* getenv_all(MPI_Comm comm, int root, const char* name)
757 {
758 int nID;
759 int len = -1;
760 static char* env = NULL;
761
762 assert(name);
763
764 MPI_Comm_rank(comm, &nID);
765
766 /* The root task does the getenv call
767 * and sends the result to the other tasks */
768 if(nID == root)
769 {
770 env = getenv(name);
771 if(env)
772 {
773 len = strlen(env);
774 MPI_Bcast(&len, 1, MPI_INT, root, comm);
775 MPI_Bcast(env, len, MPI_CHAR, root, comm);
776 }
777 /* len -1 indicates that the variable was not in the environment */
778 else
779 MPI_Bcast(&len, 1, MPI_INT, root, comm);
780 }
781 else
782 {
783 MPI_Bcast(&len, 1, MPI_INT, root, comm);
784 if(len >= 0)
785 {
786 if(env == NULL)
787 env = (char*) malloc(len+1);
788 else if(strlen(env) < len)
789 env = (char*) realloc(env, len+1);
790
791 MPI_Bcast(env, len, MPI_CHAR, root, comm);
792 env[len] = '\0';
793 }
794 else
795 {
796 if(env)
797 free(env);
798 env = NULL;
799 }
800 }
801
802 MPI_Barrier(comm);
803
804 return env;
805 }
806