1
2 #include "hdf5.h"
3 #include "testphdf5.h"
4 #include "H5Dprivate.h" /* For Chunk tests */
5
6 /* FILENAME and filenames must have the same number of names */
7 const char *FILENAME[2]={ "bigio_test.h5",
8 NULL
9 };
10
11 /* Constants definitions */
12 #define MAX_ERR_REPORT 10 /* Maximum number of errors reported */
13
14 /* Define some handy debugging shorthands, routines, ... */
15 /* debugging tools */
16
17 #define MAINPROCESS (!mpi_rank) /* define process 0 as main process */
18
19 /* Constants definitions */
20 #define RANK 2
21
22 #define IN_ORDER 1
23 #define OUT_OF_ORDER 2
24
25 #define DATASET1 "DSET1"
26 #define DATASET2 "DSET2"
27 #define DATASET3 "DSET3"
28 #define DATASET4 "DSET4"
29 #define DATASET5 "DSET5"
30 #define DXFER_COLLECTIVE_IO 0x1 /* Collective IO*/
31 #define DXFER_INDEPENDENT_IO 0x2 /* Independent IO collectively */
32 #define DXFER_BIGCOUNT 536870916
33
34 #define HYPER 1
35 #define POINT 2
36 #define ALL 3
37
38 /* Dataset data type. Int's can be easily octo dumped. */
39 typedef hsize_t B_DATATYPE;
40
41 int facc_type = FACC_MPIO; /*Test file access type */
42 int dxfer_coll_type = DXFER_COLLECTIVE_IO;
43 size_t bigcount = DXFER_BIGCOUNT;
44 int nerrors = 0;
45 int mpi_size, mpi_rank;
46
47 hsize_t space_dim1 = SPACE_DIM1 * 256; // 4096
48 hsize_t space_dim2 = SPACE_DIM2;
49
50 static void coll_chunktest(const char* filename, int chunk_factor, int select_factor,
51 int api_option, int file_selection, int mem_selection, int mode);
52 hid_t create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type);
53
54 /*
55 * Setup the coordinates for point selection.
56 */
57 static void
set_coords(hsize_t start[],hsize_t count[],hsize_t stride[],hsize_t block[],size_t num_points,hsize_t coords[],int order)58 set_coords(hsize_t start[],
59 hsize_t count[],
60 hsize_t stride[],
61 hsize_t block[],
62 size_t num_points,
63 hsize_t coords[],
64 int order)
65 {
66 hsize_t i,j, k = 0, m ,n, s1 ,s2;
67
68 if(OUT_OF_ORDER == order)
69 k = (num_points * RANK) - 1;
70 else if(IN_ORDER == order)
71 k = 0;
72
73 s1 = start[0];
74 s2 = start[1];
75
76 for(i = 0 ; i < count[0]; i++)
77 for(j = 0 ; j < count[1]; j++)
78 for(m = 0 ; m < block[0]; m++)
79 for(n = 0 ; n < block[1]; n++)
80 if(OUT_OF_ORDER == order) {
81 coords[k--] = s2 + (stride[1] * j) + n;
82 coords[k--] = s1 + (stride[0] * i) + m;
83 }
84 else if(IN_ORDER == order) {
85 coords[k++] = s1 + stride[0] * i + m;
86 coords[k++] = s2 + stride[1] * j + n;
87 }
88 }
89
90 /*
91 * Fill the dataset with trivial data for testing.
92 * Assume dimension rank is 2 and data is stored contiguous.
93 */
94 static void
fill_datasets(hsize_t start[],hsize_t block[],B_DATATYPE * dataset)95 fill_datasets(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
96 {
97 B_DATATYPE *dataptr = dataset;
98 hsize_t i, j;
99
100 /* put some trivial data in the data_array */
101 for (i=0; i < block[0]; i++){
102 for (j=0; j < block[1]; j++){
103 *dataptr = (B_DATATYPE)((i+start[0])*100 + (j+start[1]+1));
104 dataptr++;
105 }
106 }
107 }
108
109 /*
110 * Setup the coordinates for point selection.
111 */
point_set(hsize_t start[],hsize_t count[],hsize_t stride[],hsize_t block[],size_t num_points,hsize_t coords[],int order)112 void point_set(hsize_t start[],
113 hsize_t count[],
114 hsize_t stride[],
115 hsize_t block[],
116 size_t num_points,
117 hsize_t coords[],
118 int order)
119 {
120 hsize_t i,j, k = 0, m ,n, s1 ,s2;
121
122 HDcompile_assert(RANK == 2);
123
124 if(OUT_OF_ORDER == order)
125 k = (num_points * RANK) - 1;
126 else if(IN_ORDER == order)
127 k = 0;
128
129 s1 = start[0];
130 s2 = start[1];
131
132 for(i = 0 ; i < count[0]; i++)
133 for(j = 0 ; j < count[1]; j++)
134 for(m = 0 ; m < block[0]; m++)
135 for(n = 0 ; n < block[1]; n++)
136 if(OUT_OF_ORDER == order) {
137 coords[k--] = s2 + (stride[1] * j) + n;
138 coords[k--] = s1 + (stride[0] * i) + m;
139 }
140 else if(IN_ORDER == order) {
141 coords[k++] = s1 + stride[0] * i + m;
142 coords[k++] = s2 + stride[1] * j + n;
143 }
144
145 if(VERBOSE_MED) {
146 HDprintf("start[]=(%lu, %lu), count[]=(%lu, %lu), stride[]=(%lu, %lu), block[]=(%lu, %lu), total datapoints=%lu\n",
147 (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
148 (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
149 (unsigned long)(block[0] * block[1] * count[0] * count[1]));
150 k = 0;
151 for(i = 0; i < num_points ; i++) {
152 HDprintf("(%d, %d)\n", (int)coords[k], (int)coords[k + 1]);
153 k += 2;
154 }
155 }
156 }
157
158 /*
159 * Print the content of the dataset.
160 */
161 static void
dataset_print(hsize_t start[],hsize_t block[],B_DATATYPE * dataset)162 dataset_print(hsize_t start[], hsize_t block[], B_DATATYPE * dataset)
163 {
164 B_DATATYPE *dataptr = dataset;
165 hsize_t i, j;
166
167 /* print the column heading */
168 HDprintf("%-8s", "Cols:");
169 for (j=0; j < block[1]; j++){
170 HDprintf("%3lu ", (unsigned long)(start[1]+j));
171 }
172 HDprintf("\n");
173
174 /* print the slab data */
175 for (i=0; i < block[0]; i++){
176 HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
177 for (j=0; j < block[1]; j++){
178 HDprintf("%llu ", *dataptr++);
179 }
180 HDprintf("\n");
181 }
182 }
183
184
185 /*
186 * Print the content of the dataset.
187 */
188 static int
verify_data(hsize_t start[],hsize_t count[],hsize_t stride[],hsize_t block[],B_DATATYPE * dataset,B_DATATYPE * original)189 verify_data(hsize_t start[], hsize_t count[], hsize_t stride[], hsize_t block[], B_DATATYPE *dataset, B_DATATYPE *original)
190 {
191 hsize_t i, j;
192 int vrfyerrs;
193
194 /* print it if VERBOSE_MED */
195 if(VERBOSE_MED) {
196 HDprintf("verify_data dumping:::\n");
197 HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
198 (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
199 (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
200 HDprintf("original values:\n");
201 dataset_print(start, block, original);
202 HDprintf("compared values:\n");
203 dataset_print(start, block, dataset);
204 }
205
206 vrfyerrs = 0;
207 for (i=0; i < block[0]; i++){
208 for (j=0; j < block[1]; j++){
209 if(*dataset != *original){
210 if(vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
211 HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col %lu): expect %llu, got %llu\n",
212 (unsigned long)i, (unsigned long)j,
213 (unsigned long)(i+start[0]), (unsigned long)(j+start[1]),
214 *(original), *(dataset));
215 }
216 dataset++;
217 original++;
218 }
219 }
220 }
221 if(vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
222 HDprintf("[more errors ...]\n");
223 if(vrfyerrs)
224 HDprintf("%d errors found in verify_data\n", vrfyerrs);
225 return(vrfyerrs);
226 }
227
228 /* Set up the selection */
229 static void
ccslab_set(int mpi_rank,int mpi_size,hsize_t start[],hsize_t count[],hsize_t stride[],hsize_t block[],int mode)230 ccslab_set(int mpi_rank,
231 int mpi_size,
232 hsize_t start[],
233 hsize_t count[],
234 hsize_t stride[],
235 hsize_t block[],
236 int mode)
237 {
238
239 switch (mode){
240
241 case BYROW_CONT:
242 /* Each process takes a slabs of rows. */
243 block[0] = 1;
244 block[1] = 1;
245 stride[0] = 1;
246 stride[1] = 1;
247 count[0] = space_dim1;
248 count[1] = space_dim2;
249 start[0] = mpi_rank*count[0];
250 start[1] = 0;
251
252 break;
253
254 case BYROW_DISCONT:
255 /* Each process takes several disjoint blocks. */
256 block[0] = 1;
257 block[1] = 1;
258 stride[0] = 3;
259 stride[1] = 3;
260 count[0] = space_dim1/(stride[0]*block[0]);
261 count[1] = (space_dim2)/(stride[1]*block[1]);
262 start[0] = space_dim1*mpi_rank;
263 start[1] = 0;
264
265 break;
266
267 case BYROW_SELECTNONE:
268 /* Each process takes a slabs of rows, there are
269 no selections for the last process. */
270 block[0] = 1;
271 block[1] = 1;
272 stride[0] = 1;
273 stride[1] = 1;
274 count[0] = ((mpi_rank >= MAX(1,(mpi_size-2)))?0:space_dim1);
275 count[1] = space_dim2;
276 start[0] = mpi_rank*count[0];
277 start[1] = 0;
278
279 break;
280
281 case BYROW_SELECTUNBALANCE:
282 /* The first one-third of the number of processes only
283 select top half of the domain, The rest will select the bottom
284 half of the domain. */
285
286 block[0] = 1;
287 count[0] = 2;
288 stride[0] = space_dim1*mpi_size/4+1;
289 block[1] = space_dim2;
290 count[1] = 1;
291 start[1] = 0;
292 stride[1] = 1;
293 if((mpi_rank *3)<(mpi_size*2)) start[0] = mpi_rank;
294 else start[0] = 1 + space_dim1*mpi_size/2 + (mpi_rank-2*mpi_size/3);
295 break;
296
297 case BYROW_SELECTINCHUNK:
298 /* Each process will only select one chunk */
299
300 block[0] = 1;
301 count[0] = 1;
302 start[0] = mpi_rank*space_dim1;
303 stride[0]= 1;
304 block[1] = space_dim2;
305 count[1] = 1;
306 stride[1]= 1;
307 start[1] = 0;
308
309 break;
310
311 default:
312 /* Unknown mode. Set it to cover the whole dataset. */
313 block[0] = space_dim1*mpi_size;
314 block[1] = space_dim2;
315 stride[0] = block[0];
316 stride[1] = block[1];
317 count[0] = 1;
318 count[1] = 1;
319 start[0] = 0;
320 start[1] = 0;
321
322 break;
323 }
324 if (VERBOSE_MED){
325 HDprintf("start[]=(%lu,%lu), count[]=(%lu,%lu), stride[]=(%lu,%lu), block[]=(%lu,%lu), total datapoints=%lu\n",
326 (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
327 (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1],
328 (unsigned long)(block[0]*block[1]*count[0]*count[1]));
329 }
330 }
331
332
333 /*
334 * Fill the dataset with trivial data for testing.
335 * Assume dimension rank is 2.
336 */
337 static void
ccdataset_fill(hsize_t start[],hsize_t stride[],hsize_t count[],hsize_t block[],DATATYPE * dataset,int mem_selection)338 ccdataset_fill(hsize_t start[],
339 hsize_t stride[],
340 hsize_t count[],
341 hsize_t block[],
342 DATATYPE * dataset,
343 int mem_selection)
344 {
345 DATATYPE *dataptr = dataset;
346 DATATYPE *tmptr;
347 hsize_t i,j,k1,k2,k=0;
348 /* put some trivial data in the data_array */
349 tmptr = dataptr;
350
351 /* assign the disjoint block (two-dimensional)data array value
352 through the pointer */
353
354 for (k1 = 0; k1 < count[0]; k1++) {
355 for(i = 0; i < block[0]; i++) {
356 for(k2 = 0; k2 < count[1]; k2++) {
357 for(j = 0;j < block[1]; j++) {
358
359 if (ALL != mem_selection) {
360 dataptr = tmptr + ((start[0]+k1*stride[0]+i)*space_dim2+
361 start[1]+k2*stride[1]+j);
362 }
363 else {
364 dataptr = tmptr + k;
365 k++;
366 }
367
368 *dataptr = (DATATYPE)(k1+k2+i+j);
369 }
370 }
371 }
372 }
373 }
374
375 /*
376 * Print the first block of the content of the dataset.
377 */
378 static void
ccdataset_print(hsize_t start[],hsize_t block[],DATATYPE * dataset)379 ccdataset_print(hsize_t start[],
380 hsize_t block[],
381 DATATYPE * dataset)
382
383 {
384 DATATYPE *dataptr = dataset;
385 hsize_t i, j;
386
387 /* print the column heading */
388 HDprintf("Print only the first block of the dataset\n");
389 HDprintf("%-8s", "Cols:");
390 for (j=0; j < block[1]; j++){
391 HDprintf("%3lu ", (unsigned long)(start[1]+j));
392 }
393 HDprintf("\n");
394
395 /* print the slab data */
396 for (i=0; i < block[0]; i++){
397 HDprintf("Row %2lu: ", (unsigned long)(i+start[0]));
398 for (j=0; j < block[1]; j++){
399 HDprintf("%03d ", *dataptr++);
400 }
401 HDprintf("\n");
402 }
403 }
404
405 /*
406 * Print the content of the dataset.
407 */
408 static int
ccdataset_vrfy(hsize_t start[],hsize_t count[],hsize_t stride[],hsize_t block[],DATATYPE * dataset,DATATYPE * original,int mem_selection)409 ccdataset_vrfy(hsize_t start[],
410 hsize_t count[],
411 hsize_t stride[],
412 hsize_t block[],
413 DATATYPE *dataset,
414 DATATYPE *original,
415 int mem_selection)
416 {
417 hsize_t i, j,k1,k2,k=0;
418 int vrfyerrs;
419 DATATYPE *dataptr,*oriptr;
420
421 /* print it if VERBOSE_MED */
422 if (VERBOSE_MED) {
423 HDprintf("dataset_vrfy dumping:::\n");
424 HDprintf("start(%lu, %lu), count(%lu, %lu), stride(%lu, %lu), block(%lu, %lu)\n",
425 (unsigned long)start[0], (unsigned long)start[1], (unsigned long)count[0], (unsigned long)count[1],
426 (unsigned long)stride[0], (unsigned long)stride[1], (unsigned long)block[0], (unsigned long)block[1]);
427 HDprintf("original values:\n");
428 ccdataset_print(start, block, original);
429 HDprintf("compared values:\n");
430 ccdataset_print(start, block, dataset);
431 }
432
433 vrfyerrs = 0;
434
435 for (k1=0;k1<count[0];k1++) {
436 for(i=0;i<block[0];i++) {
437 for(k2=0; k2<count[1];k2++) {
438 for(j=0;j<block[1];j++) {
439 if (ALL != mem_selection) {
440 dataptr = dataset + ((start[0]+k1*stride[0]+i)*space_dim2+
441 start[1]+k2*stride[1]+j);
442 oriptr = original + ((start[0]+k1*stride[0]+i)*space_dim2+
443 start[1]+k2*stride[1]+j);
444 }
445 else {
446 dataptr = dataset + k;
447 oriptr = original + k;
448 k++;
449 }
450 if (*dataptr != *oriptr){
451 if (vrfyerrs++ < MAX_ERR_REPORT || VERBOSE_MED){
452 HDprintf("Dataset Verify failed at [%lu][%lu]: expect %d, got %d\n",
453 (unsigned long)i, (unsigned long)j,
454 *(oriptr), *(dataptr));
455 }
456 }
457 }
458 }
459 }
460 }
461 if (vrfyerrs > MAX_ERR_REPORT && !VERBOSE_MED)
462 HDprintf("[more errors ...]\n");
463 if (vrfyerrs)
464 HDprintf("%d errors found in ccdataset_vrfy\n", vrfyerrs);
465 return(vrfyerrs);
466 }
467
468 /*
469 * Example of using the parallel HDF5 library to create two datasets
470 * in one HDF5 file with collective parallel access support.
471 * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
472 * Each process controls only a slab of size dim0 x dim1 within each
473 * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
474 * each process controls a hyperslab within.]
475 */
476
477 static void
dataset_big_write(void)478 dataset_big_write(void)
479 {
480
481 hid_t xfer_plist; /* Dataset transfer properties list */
482 hid_t sid; /* Dataspace ID */
483 hid_t file_dataspace; /* File dataspace ID */
484 hid_t mem_dataspace; /* memory dataspace ID */
485 hid_t dataset;
486 hid_t datatype; /* Datatype ID */
487 hsize_t dims[RANK]; /* dataset dim sizes */
488 hsize_t start[RANK]; /* for hyperslab setting */
489 hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
490 hsize_t block[RANK]; /* for hyperslab setting */
491 hsize_t *coords = NULL;
492 int i;
493 herr_t ret; /* Generic return value */
494 hid_t fid; /* HDF5 file ID */
495 hid_t acc_tpl; /* File access templates */
496 hsize_t h;
497 size_t num_points;
498 B_DATATYPE * wdata;
499
500
501 /* allocate memory for data buffer */
502 wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
503 VRFY((wdata != NULL), "wdata malloc succeeded");
504
505 /* setup file access template */
506 acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
507 VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
508 H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
509
510 /* create the file collectively */
511 fid = H5Fcreate(FILENAME[0], H5F_ACC_TRUNC, H5P_DEFAULT, acc_tpl);
512 VRFY((fid >= 0), "H5Fcreate succeeded");
513
514 /* Release file-access template */
515 ret = H5Pclose(acc_tpl);
516 VRFY((ret >= 0), "");
517
518
519 /* Each process takes a slabs of rows. */
520 if (mpi_rank == 0)
521 HDprintf("\nTesting Dataset1 write by ROW\n");
522 /* Create a large dataset */
523 dims[0] = bigcount;
524 dims[1] = mpi_size;
525
526 sid = H5Screate_simple (RANK, dims, NULL);
527 VRFY((sid >= 0), "H5Screate_simple succeeded");
528 dataset = H5Dcreate2(fid, DATASET1, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
529 VRFY((dataset >= 0), "H5Dcreate2 succeeded");
530 H5Sclose(sid);
531
532 block[0] = dims[0]/mpi_size;
533 block[1] = dims[1];
534 stride[0] = block[0];
535 stride[1] = block[1];
536 count[0] = 1;
537 count[1] = 1;
538 start[0] = mpi_rank*block[0];
539 start[1] = 0;
540
541 /* create a file dataspace independently */
542 file_dataspace = H5Dget_space (dataset);
543 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
544 ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
545 VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
546
547 /* create a memory dataspace independently */
548 mem_dataspace = H5Screate_simple (RANK, block, NULL);
549 VRFY((mem_dataspace >= 0), "");
550
551 /* fill the local slab with some trivial data */
552 fill_datasets(start, block, wdata);
553 MESG("data_array initialized");
554 if(VERBOSE_MED){
555 MESG("data_array created");
556 dataset_print(start, block, wdata);
557 }
558
559 /* set up the collective transfer properties list */
560 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
561 VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
562 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
563 VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
564 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
565 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
566 VRFY((ret>= 0),"set independent IO collectively succeeded");
567 }
568
569 ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
570 xfer_plist, wdata);
571 VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
572
573 /* release all temporary handles. */
574 H5Sclose(file_dataspace);
575 H5Sclose(mem_dataspace);
576 H5Pclose(xfer_plist);
577
578 ret = H5Dclose(dataset);
579 VRFY((ret >= 0), "H5Dclose1 succeeded");
580
581
582 /* Each process takes a slabs of cols. */
583 if (mpi_rank == 0)
584 HDprintf("\nTesting Dataset2 write by COL\n");
585 /* Create a large dataset */
586 dims[0] = bigcount;
587 dims[1] = mpi_size;
588
589 sid = H5Screate_simple (RANK, dims, NULL);
590 VRFY((sid >= 0), "H5Screate_simple succeeded");
591 dataset = H5Dcreate2(fid, DATASET2, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
592 VRFY((dataset >= 0), "H5Dcreate2 succeeded");
593 H5Sclose(sid);
594
595 block[0] = dims[0];
596 block[1] = dims[1]/mpi_size;
597 stride[0] = block[0];
598 stride[1] = block[1];
599 count[0] = 1;
600 count[1] = 1;
601 start[0] = 0;
602 start[1] = mpi_rank*block[1];
603
604 /* create a file dataspace independently */
605 file_dataspace = H5Dget_space (dataset);
606 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
607 ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
608 VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
609
610 /* create a memory dataspace independently */
611 mem_dataspace = H5Screate_simple (RANK, block, NULL);
612 VRFY((mem_dataspace >= 0), "");
613
614 /* fill the local slab with some trivial data */
615 fill_datasets(start, block, wdata);
616 MESG("data_array initialized");
617 if(VERBOSE_MED){
618 MESG("data_array created");
619 dataset_print(start, block, wdata);
620 }
621
622 /* set up the collective transfer properties list */
623 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
624 VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
625 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
626 VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
627 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
628 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
629 VRFY((ret>= 0),"set independent IO collectively succeeded");
630 }
631
632 ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
633 xfer_plist, wdata);
634 VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
635
636 /* release all temporary handles. */
637 H5Sclose(file_dataspace);
638 H5Sclose(mem_dataspace);
639 H5Pclose(xfer_plist);
640
641 ret = H5Dclose(dataset);
642 VRFY((ret >= 0), "H5Dclose1 succeeded");
643
644
645
646 /* ALL selection */
647 if (mpi_rank == 0)
648 HDprintf("\nTesting Dataset3 write select ALL proc 0, NONE others\n");
649 /* Create a large dataset */
650 dims[0] = bigcount;
651 dims[1] = 1;
652
653 sid = H5Screate_simple (RANK, dims, NULL);
654 VRFY((sid >= 0), "H5Screate_simple succeeded");
655 dataset = H5Dcreate2(fid, DATASET3, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
656 VRFY((dataset >= 0), "H5Dcreate2 succeeded");
657 H5Sclose(sid);
658
659 /* create a file dataspace independently */
660 file_dataspace = H5Dget_space (dataset);
661 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
662 if(mpi_rank == 0) {
663 ret = H5Sselect_all(file_dataspace);
664 VRFY((ret >= 0), "H5Sset_all succeeded");
665 }
666 else {
667 ret = H5Sselect_none(file_dataspace);
668 VRFY((ret >= 0), "H5Sset_none succeeded");
669 }
670
671 /* create a memory dataspace independently */
672 mem_dataspace = H5Screate_simple (RANK, dims, NULL);
673 VRFY((mem_dataspace >= 0), "");
674 if(mpi_rank != 0) {
675 ret = H5Sselect_none(mem_dataspace);
676 VRFY((ret >= 0), "H5Sset_none succeeded");
677 }
678
679 /* set up the collective transfer properties list */
680 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
681 VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
682 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
683 VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
684 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
685 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
686 VRFY((ret>= 0),"set independent IO collectively succeeded");
687 }
688
689 /* fill the local slab with some trivial data */
690 fill_datasets(start, dims, wdata);
691 MESG("data_array initialized");
692 if(VERBOSE_MED){
693 MESG("data_array created");
694 }
695
696 ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
697 xfer_plist, wdata);
698 VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
699
700 /* release all temporary handles. */
701 H5Sclose(file_dataspace);
702 H5Sclose(mem_dataspace);
703 H5Pclose(xfer_plist);
704
705 ret = H5Dclose(dataset);
706 VRFY((ret >= 0), "H5Dclose1 succeeded");
707
708 /* Point selection */
709 if (mpi_rank == 0)
710 HDprintf("\nTesting Dataset4 write point selection\n");
711 /* Create a large dataset */
712 dims[0] = bigcount;
713 dims[1] = mpi_size * 4;
714
715 sid = H5Screate_simple (RANK, dims, NULL);
716 VRFY((sid >= 0), "H5Screate_simple succeeded");
717 dataset = H5Dcreate2(fid, DATASET4, H5T_NATIVE_LLONG, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
718 VRFY((dataset >= 0), "H5Dcreate2 succeeded");
719 H5Sclose(sid);
720
721 block[0] = dims[0]/2;
722 block[1] = 2;
723 stride[0] = dims[0]/2;
724 stride[1] = 2;
725 count[0] = 1;
726 count[1] = 1;
727 start[0] = 0;
728 start[1] = dims[1]/mpi_size * mpi_rank;
729
730 num_points = bigcount;
731
732 coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
733 VRFY((coords != NULL), "coords malloc succeeded");
734
735 set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
736 /* create a file dataspace */
737 file_dataspace = H5Dget_space (dataset);
738 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
739 ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
740 VRFY((ret >= 0), "H5Sselect_elements succeeded");
741
742 if(coords) free(coords);
743
744 fill_datasets(start, block, wdata);
745 MESG("data_array initialized");
746 if(VERBOSE_MED){
747 MESG("data_array created");
748 dataset_print(start, block, wdata);
749 }
750
751 /* create a memory dataspace */
752 /* Warning: H5Screate_simple requires an array of hsize_t elements
753 * even if we only pass only a single value. Attempting anything else
754 * appears to cause problems with 32 bit compilers.
755 */
756 mem_dataspace = H5Screate_simple (1, dims, NULL);
757 VRFY((mem_dataspace >= 0), "");
758
759 /* set up the collective transfer properties list */
760 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
761 VRFY((xfer_plist >= 0), "H5Pcreate xfer succeeded");
762 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
763 VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
764 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
765 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
766 VRFY((ret>= 0),"set independent IO collectively succeeded");
767 }
768
769 ret = H5Dwrite(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
770 xfer_plist, wdata);
771 VRFY((ret >= 0), "H5Dwrite dataset1 succeeded");
772
773 /* release all temporary handles. */
774 H5Sclose(file_dataspace);
775 H5Sclose(mem_dataspace);
776 H5Pclose(xfer_plist);
777
778 ret = H5Dclose(dataset);
779 VRFY((ret >= 0), "H5Dclose1 succeeded");
780
781 HDfree(wdata);
782 H5Fclose(fid);
783 }
784
785 /*
786 * Example of using the parallel HDF5 library to read two datasets
787 * in one HDF5 file with collective parallel access support.
788 * The Datasets are of sizes (number-of-mpi-processes x dim0) x dim1.
789 * Each process controls only a slab of size dim0 x dim1 within each
790 * dataset. [Note: not so yet. Datasets are of sizes dim0xdim1 and
791 * each process controls a hyperslab within.]
792 */
793
794 static void
dataset_big_read(void)795 dataset_big_read(void)
796 {
797 hid_t fid; /* HDF5 file ID */
798 hid_t acc_tpl; /* File access templates */
799 hid_t xfer_plist; /* Dataset transfer properties list */
800 hid_t file_dataspace; /* File dataspace ID */
801 hid_t mem_dataspace; /* memory dataspace ID */
802 hid_t dataset;
803 B_DATATYPE *rdata = NULL; /* data buffer */
804 B_DATATYPE *wdata = NULL; /* expected data buffer */
805 hsize_t dims[RANK]; /* dataset dim sizes */
806 hsize_t start[RANK]; /* for hyperslab setting */
807 hsize_t count[RANK], stride[RANK]; /* for hyperslab setting */
808 hsize_t block[RANK]; /* for hyperslab setting */
809 int i,j,k;
810 hsize_t h;
811 size_t num_points;
812 hsize_t *coords = NULL;
813 herr_t ret; /* Generic return value */
814
815 /* allocate memory for data buffer */
816 rdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
817 VRFY((rdata != NULL), "rdata malloc succeeded");
818 wdata = (B_DATATYPE *)HDmalloc(bigcount*sizeof(B_DATATYPE));
819 VRFY((wdata != NULL), "wdata malloc succeeded");
820
821 HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
822
823 /* setup file access template */
824 acc_tpl = H5Pcreate (H5P_FILE_ACCESS);
825 VRFY((acc_tpl >= 0), "H5P_FILE_ACCESS");
826 H5Pset_fapl_mpio(acc_tpl, MPI_COMM_WORLD, MPI_INFO_NULL);
827
828 /* open the file collectively */
829 fid=H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_tpl);
830 VRFY((fid >= 0), "H5Fopen succeeded");
831
832 /* Release file-access template */
833 ret = H5Pclose(acc_tpl);
834 VRFY((ret >= 0), "");
835
836 if (mpi_rank == 0)
837 HDprintf("\nRead Testing Dataset1 by COL\n");
838
839 dataset = H5Dopen2(fid, DATASET1, H5P_DEFAULT);
840 VRFY((dataset >= 0), "H5Dopen2 succeeded");
841
842 dims[0] = bigcount;
843 dims[1] = mpi_size;
844 /* Each process takes a slabs of cols. */
845 block[0] = dims[0];
846 block[1] = dims[1]/mpi_size;
847 stride[0] = block[0];
848 stride[1] = block[1];
849 count[0] = 1;
850 count[1] = 1;
851 start[0] = 0;
852 start[1] = mpi_rank*block[1];
853
854 /* create a file dataspace independently */
855 file_dataspace = H5Dget_space (dataset);
856 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
857 ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
858 VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
859
860 /* create a memory dataspace independently */
861 mem_dataspace = H5Screate_simple (RANK, block, NULL);
862 VRFY((mem_dataspace >= 0), "");
863
864 /* fill dataset with test data */
865 fill_datasets(start, block, wdata);
866 MESG("data_array initialized");
867 if(VERBOSE_MED){
868 MESG("data_array created");
869 }
870
871 /* set up the collective transfer properties list */
872 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
873 VRFY((xfer_plist >= 0), "");
874 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
875 VRFY((ret >= 0), "H5Pcreate xfer succeeded");
876 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
877 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
878 VRFY((ret>= 0),"set independent IO collectively succeeded");
879 }
880
881 /* read data collectively */
882 ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
883 xfer_plist, rdata);
884 VRFY((ret >= 0), "H5Dread dataset1 succeeded");
885
886 /* verify the read data with original expected data */
887 ret = verify_data(start, count, stride, block, rdata, wdata);
888 if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
889
890 /* release all temporary handles. */
891 H5Sclose(file_dataspace);
892 H5Sclose(mem_dataspace);
893 H5Pclose(xfer_plist);
894 ret = H5Dclose(dataset);
895 VRFY((ret >= 0), "H5Dclose1 succeeded");
896
897
898 if (mpi_rank == 0)
899 HDprintf("\nRead Testing Dataset2 by ROW\n");
900 HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
901 dataset = H5Dopen2(fid, DATASET2, H5P_DEFAULT);
902 VRFY((dataset >= 0), "H5Dopen2 succeeded");
903
904 dims[0] = bigcount;
905 dims[1] = mpi_size;
906 /* Each process takes a slabs of rows. */
907 block[0] = dims[0]/mpi_size;
908 block[1] = dims[1];
909 stride[0] = block[0];
910 stride[1] = block[1];
911 count[0] = 1;
912 count[1] = 1;
913 start[0] = mpi_rank*block[0];
914 start[1] = 0;
915
916 /* create a file dataspace independently */
917 file_dataspace = H5Dget_space (dataset);
918 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
919 ret = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
920 VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
921
922 /* create a memory dataspace independently */
923 mem_dataspace = H5Screate_simple (RANK, block, NULL);
924 VRFY((mem_dataspace >= 0), "");
925
926 /* fill dataset with test data */
927 fill_datasets(start, block, wdata);
928 MESG("data_array initialized");
929 if(VERBOSE_MED){
930 MESG("data_array created");
931 }
932
933 /* set up the collective transfer properties list */
934 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
935 VRFY((xfer_plist >= 0), "");
936 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
937 VRFY((ret >= 0), "H5Pcreate xfer succeeded");
938 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
939 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
940 VRFY((ret>= 0),"set independent IO collectively succeeded");
941 }
942
943 /* read data collectively */
944 ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
945 xfer_plist, rdata);
946 VRFY((ret >= 0), "H5Dread dataset2 succeeded");
947
948 /* verify the read data with original expected data */
949 ret = verify_data(start, count, stride, block, rdata, wdata);
950 if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
951
952 /* release all temporary handles. */
953 H5Sclose(file_dataspace);
954 H5Sclose(mem_dataspace);
955 H5Pclose(xfer_plist);
956 ret = H5Dclose(dataset);
957 VRFY((ret >= 0), "H5Dclose1 succeeded");
958
959 if (mpi_rank == 0)
960 HDprintf("\nRead Testing Dataset3 read select ALL proc 0, NONE others\n");
961 HDmemset(rdata, 0, bigcount*sizeof(B_DATATYPE));
962 dataset = H5Dopen2(fid, DATASET3, H5P_DEFAULT);
963 VRFY((dataset >= 0), "H5Dopen2 succeeded");
964
965 dims[0] = bigcount;
966 dims[1] = 1;
967
968 /* create a file dataspace independently */
969 file_dataspace = H5Dget_space (dataset);
970 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
971 if(mpi_rank == 0) {
972 ret = H5Sselect_all(file_dataspace);
973 VRFY((ret >= 0), "H5Sset_all succeeded");
974 }
975 else {
976 ret = H5Sselect_none(file_dataspace);
977 VRFY((ret >= 0), "H5Sset_none succeeded");
978 }
979
980 /* create a memory dataspace independently */
981 mem_dataspace = H5Screate_simple (RANK, dims, NULL);
982 VRFY((mem_dataspace >= 0), "");
983 if(mpi_rank != 0) {
984 ret = H5Sselect_none(mem_dataspace);
985 VRFY((ret >= 0), "H5Sset_none succeeded");
986 }
987
988 /* fill dataset with test data */
989 fill_datasets(start, dims, wdata);
990 MESG("data_array initialized");
991 if(VERBOSE_MED){
992 MESG("data_array created");
993 }
994
995 /* set up the collective transfer properties list */
996 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
997 VRFY((xfer_plist >= 0), "");
998 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
999 VRFY((ret >= 0), "H5Pcreate xfer succeeded");
1000 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
1001 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
1002 VRFY((ret>= 0),"set independent IO collectively succeeded");
1003 }
1004
1005 /* read data collectively */
1006 ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
1007 xfer_plist, rdata);
1008 VRFY((ret >= 0), "H5Dread dataset3 succeeded");
1009
1010 if(mpi_rank == 0) {
1011 /* verify the read data with original expected data */
1012 ret = verify_data(start, count, stride, block, rdata, wdata);
1013 if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
1014 }
1015
1016 /* release all temporary handles. */
1017 H5Sclose(file_dataspace);
1018 H5Sclose(mem_dataspace);
1019 H5Pclose(xfer_plist);
1020 ret = H5Dclose(dataset);
1021 VRFY((ret >= 0), "H5Dclose1 succeeded");
1022
1023 if (mpi_rank == 0)
1024 HDprintf("\nRead Testing Dataset4 with Point selection\n");
1025 dataset = H5Dopen2(fid, DATASET4, H5P_DEFAULT);
1026 VRFY((dataset >= 0), "H5Dopen2 succeeded");
1027
1028 dims[0] = bigcount;
1029 dims[1] = mpi_size * 4;
1030
1031 block[0] = dims[0]/2;
1032 block[1] = 2;
1033 stride[0] = dims[0]/2;
1034 stride[1] = 2;
1035 count[0] = 1;
1036 count[1] = 1;
1037 start[0] = 0;
1038 start[1] = dims[1]/mpi_size * mpi_rank;
1039
1040 fill_datasets(start, block, wdata);
1041 MESG("data_array initialized");
1042 if(VERBOSE_MED){
1043 MESG("data_array created");
1044 dataset_print(start, block, wdata);
1045 }
1046
1047 num_points = bigcount;
1048
1049 coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
1050 VRFY((coords != NULL), "coords malloc succeeded");
1051
1052 set_coords (start, count, stride, block, num_points, coords, IN_ORDER);
1053 /* create a file dataspace */
1054 file_dataspace = H5Dget_space (dataset);
1055 VRFY((file_dataspace >= 0), "H5Dget_space succeeded");
1056 ret = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
1057 VRFY((ret >= 0), "H5Sselect_elements succeeded");
1058
1059 if(coords) HDfree(coords);
1060
1061 /* create a memory dataspace */
1062 /* Warning: H5Screate_simple requires an array of hsize_t elements
1063 * even if we only pass only a single value. Attempting anything else
1064 * appears to cause problems with 32 bit compilers.
1065 */
1066 mem_dataspace = H5Screate_simple (1, dims, NULL);
1067 VRFY((mem_dataspace >= 0), "");
1068
1069 /* set up the collective transfer properties list */
1070 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
1071 VRFY((xfer_plist >= 0), "");
1072 ret = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
1073 VRFY((ret >= 0), "H5Pcreate xfer succeeded");
1074 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
1075 ret = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
1076 VRFY((ret>= 0),"set independent IO collectively succeeded");
1077 }
1078
1079 /* read data collectively */
1080 ret = H5Dread(dataset, H5T_NATIVE_LLONG, mem_dataspace, file_dataspace,
1081 xfer_plist, rdata);
1082 VRFY((ret >= 0), "H5Dread dataset1 succeeded");
1083
1084 ret = verify_data(start, count, stride, block, rdata, wdata);
1085 if(ret) {HDfprintf(stderr, "verify failed\n"); exit(1);}
1086
1087 /* release all temporary handles. */
1088 H5Sclose(file_dataspace);
1089 H5Sclose(mem_dataspace);
1090 H5Pclose(xfer_plist);
1091 ret = H5Dclose(dataset);
1092 VRFY((ret >= 0), "H5Dclose1 succeeded");
1093
1094 HDfree(wdata);
1095 HDfree(rdata);
1096
1097 wdata = NULL;
1098 rdata = NULL;
1099 /* We never wrote Dataset5 in the write section, so we can't
1100 * expect to read it...
1101 */
1102 file_dataspace = -1;
1103 mem_dataspace = -1;
1104 xfer_plist = -1;
1105 dataset = -1;
1106
1107 /* release all temporary handles. */
1108 if (file_dataspace != -1) H5Sclose(file_dataspace);
1109 if (mem_dataspace != -1) H5Sclose(mem_dataspace);
1110 if (xfer_plist != -1) H5Pclose(xfer_plist);
1111 if (dataset != -1) {
1112 ret = H5Dclose(dataset);
1113 VRFY((ret >= 0), "H5Dclose1 succeeded");
1114 }
1115 H5Fclose(fid);
1116
1117 /* release data buffers */
1118 if(rdata) HDfree(rdata);
1119 if(wdata) HDfree(wdata);
1120
1121 } /* dataset_large_readAll */
1122
1123
1124 /*
1125 * Create the appropriate File access property list
1126 */
1127 hid_t
create_faccess_plist(MPI_Comm comm,MPI_Info info,int l_facc_type)1128 create_faccess_plist(MPI_Comm comm, MPI_Info info, int l_facc_type)
1129 {
1130 hid_t ret_pl = -1;
1131 herr_t ret; /* generic return value */
1132 int mpi_rank; /* mpi variables */
1133
1134 /* need the rank for error checking macros */
1135 MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1136
1137 ret_pl = H5Pcreate (H5P_FILE_ACCESS);
1138 VRFY((ret_pl >= 0), "H5P_FILE_ACCESS");
1139
1140 if (l_facc_type == FACC_DEFAULT)
1141 return (ret_pl);
1142
1143 if (l_facc_type == FACC_MPIO){
1144 /* set Parallel access with communicator */
1145 ret = H5Pset_fapl_mpio(ret_pl, comm, info);
1146 VRFY((ret >= 0), "");
1147 ret = H5Pset_all_coll_metadata_ops(ret_pl, TRUE);
1148 VRFY((ret >= 0), "");
1149 ret = H5Pset_coll_metadata_write(ret_pl, TRUE);
1150 VRFY((ret >= 0), "");
1151 return(ret_pl);
1152 }
1153
1154 if (l_facc_type == (FACC_MPIO | FACC_SPLIT)){
1155 hid_t mpio_pl;
1156
1157 mpio_pl = H5Pcreate (H5P_FILE_ACCESS);
1158 VRFY((mpio_pl >= 0), "");
1159 /* set Parallel access with communicator */
1160 ret = H5Pset_fapl_mpio(mpio_pl, comm, info);
1161 VRFY((ret >= 0), "");
1162
1163 /* setup file access template */
1164 ret_pl = H5Pcreate (H5P_FILE_ACCESS);
1165 VRFY((ret_pl >= 0), "");
1166 /* set Parallel access with communicator */
1167 ret = H5Pset_fapl_split(ret_pl, ".meta", mpio_pl, ".raw", mpio_pl);
1168 VRFY((ret >= 0), "H5Pset_fapl_split succeeded");
1169 H5Pclose(mpio_pl);
1170 return(ret_pl);
1171 }
1172
1173 /* unknown file access types */
1174 return (ret_pl);
1175 }
1176
1177
1178 /*-------------------------------------------------------------------------
1179 * Function: coll_chunk1
1180 *
1181 * Purpose: Wrapper to test the collective chunk IO for regular JOINT
1182 selection with a single chunk
1183 *
1184 * Return: Success: 0
1185 *
1186 * Failure: -1
1187 *
1188 * Programmer: Unknown
1189 * July 12th, 2004
1190 *
1191 * Modifications:
1192 *
1193 *-------------------------------------------------------------------------
1194 */
1195
1196 /* ------------------------------------------------------------------------
1197 * Descriptions for the selection: One big singluar selection inside one chunk
1198 * Two dimensions,
1199 *
1200 * dim1 = space_dim1(5760)*mpi_size
1201 * dim2 = space_dim2(3)
1202 * chunk_dim1 = dim1
1203 * chunk_dim2 = dim2
1204 * block = 1 for all dimensions
1205 * stride = 1 for all dimensions
1206 * count0 = space_dim1(5760)
1207 * count1 = space_dim2(3)
1208 * start0 = mpi_rank*space_dim1
1209 * start1 = 0
1210 * ------------------------------------------------------------------------
1211 */
1212
1213 void
coll_chunk1(void)1214 coll_chunk1(void)
1215 {
1216 const char *filename = FILENAME[0];
1217 if (mpi_rank == 0)
1218 HDprintf("coll_chunk1\n");
1219
1220 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
1221 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
1222 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
1223 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
1224 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
1225
1226 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
1227 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
1228 coll_chunktest(filename, 1, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
1229 }
1230
1231
1232 /*-------------------------------------------------------------------------
1233 * Function: coll_chunk2
1234 *
1235 * Purpose: Wrapper to test the collective chunk IO for regular DISJOINT
1236 selection with a single chunk
1237 *
1238 * Return: Success: 0
1239 *
1240 * Failure: -1
1241 *
1242 * Programmer: Unknown
1243 * July 12th, 2004
1244 *
1245 * Modifications:
1246 *
1247 *-------------------------------------------------------------------------
1248 */
1249
1250 /* ------------------------------------------------------------------------
1251 * Descriptions for the selection: many disjoint selections inside one chunk
1252 * Two dimensions,
1253 *
1254 * dim1 = space_dim1*mpi_size(5760)
1255 * dim2 = space_dim2(3)
1256 * chunk_dim1 = dim1
1257 * chunk_dim2 = dim2
1258 * block = 1 for all dimensions
1259 * stride = 3 for all dimensions
1260 * count0 = space_dim1/stride0(5760/3)
1261 * count1 = space_dim2/stride(3/3 = 1)
1262 * start0 = mpi_rank*space_dim1
1263 * start1 = 0
1264 *
1265 * ------------------------------------------------------------------------
1266 */
1267 void
coll_chunk2(void)1268 coll_chunk2(void)
1269 {
1270 const char *filename = FILENAME[0];
1271 if (mpi_rank == 0)
1272 HDprintf("coll_chunk2\n");
1273
1274 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
1275 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
1276 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
1277 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
1278 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
1279
1280 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, ALL, IN_ORDER);
1281 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, POINT, IN_ORDER);
1282 coll_chunktest(filename, 1, BYROW_DISCONT, API_NONE, POINT, HYPER, IN_ORDER);
1283 }
1284
1285
1286 /*-------------------------------------------------------------------------
1287 * Function: coll_chunk3
1288 *
1289 * Purpose: Wrapper to test the collective chunk IO for regular JOINT
1290 selection with at least number of 2*mpi_size chunks
1291 *
1292 * Return: Success: 0
1293 *
1294 * Failure: -1
1295 *
1296 * Programmer: Unknown
1297 * July 12th, 2004
1298 *
1299 * Modifications:
1300 *
1301 *-------------------------------------------------------------------------
1302 */
1303
1304 /* ------------------------------------------------------------------------
1305 * Descriptions for the selection: one singular selection accross many chunks
1306 * Two dimensions, Num of chunks = 2* mpi_size
1307 *
1308 * dim1 = space_dim1*mpi_size
1309 * dim2 = space_dim2(3)
1310 * chunk_dim1 = space_dim1
1311 * chunk_dim2 = dim2/2
1312 * block = 1 for all dimensions
1313 * stride = 1 for all dimensions
1314 * count0 = space_dim1
1315 * count1 = space_dim2(3)
1316 * start0 = mpi_rank*space_dim1
1317 * start1 = 0
1318 *
1319 * ------------------------------------------------------------------------
1320 */
1321
1322 void
coll_chunk3(void)1323 coll_chunk3(void)
1324 {
1325 const char *filename = FILENAME[0];
1326 if (mpi_rank == 0)
1327 HDprintf("coll_chunk3\n");
1328
1329 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, HYPER, OUT_OF_ORDER);
1330 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, HYPER, POINT, OUT_OF_ORDER);
1331 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, OUT_OF_ORDER);
1332 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, OUT_OF_ORDER);
1333 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, OUT_OF_ORDER);
1334
1335 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, ALL, IN_ORDER);
1336 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, POINT, IN_ORDER);
1337 coll_chunktest(filename, mpi_size, BYROW_CONT, API_NONE, POINT, HYPER, IN_ORDER);
1338 }
1339
1340
1341 //-------------------------------------------------------------------------
1342 // Borrowed/Modified (slightly) from t_coll_chunk.c
1343 /*-------------------------------------------------------------------------
1344 * Function: coll_chunktest
1345 *
1346 * Purpose: The real testing routine for regular selection of collective
1347 chunking storage
1348 testing both write and read,
1349 If anything fails, it may be read or write. There is no
1350 separation test between read and write.
1351 *
1352 * Return: Success: 0
1353 *
1354 * Failure: -1
1355 *
1356 * Modifications:
1357 * Remove invalid temporary property checkings for API_LINK_HARD and
1358 * API_LINK_TRUE cases.
1359 * Programmer: Jonathan Kim
1360 * Date: 2012-10-10
1361 *
1362 * Programmer: Unknown
1363 * July 12th, 2004
1364 *
1365 * Modifications:
1366 *
1367 *-------------------------------------------------------------------------
1368 */
1369
1370 static void
coll_chunktest(const char * filename,int chunk_factor,int select_factor,int api_option,int file_selection,int mem_selection,int mode)1371 coll_chunktest(const char* filename,
1372 int chunk_factor,
1373 int select_factor,
1374 int api_option,
1375 int file_selection,
1376 int mem_selection,
1377 int mode)
1378 {
1379 hid_t file, dataset, file_dataspace, mem_dataspace;
1380 hid_t acc_plist,xfer_plist,crp_plist;
1381
1382 hsize_t dims[RANK], chunk_dims[RANK];
1383 int* data_array1 = NULL;
1384 int* data_origin1 = NULL;
1385
1386 hsize_t start[RANK],count[RANK],stride[RANK],block[RANK];
1387
1388 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
1389 unsigned prop_value;
1390 #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
1391
1392 herr_t status;
1393 MPI_Comm comm = MPI_COMM_WORLD;
1394 MPI_Info info = MPI_INFO_NULL;
1395
1396 size_t num_points; /* for point selection */
1397 hsize_t *coords = NULL; /* for point selection */
1398 int i;
1399
1400 /* Create the data space */
1401
1402 acc_plist = create_faccess_plist(comm,info,facc_type);
1403 VRFY((acc_plist >= 0),"");
1404
1405 file = H5Fcreate(filename,H5F_ACC_TRUNC,H5P_DEFAULT,acc_plist);
1406 VRFY((file >= 0),"H5Fcreate succeeded");
1407
1408 status = H5Pclose(acc_plist);
1409 VRFY((status >= 0),"");
1410
1411 /* setup dimensionality object */
1412 dims[0] = space_dim1*mpi_size;
1413 dims[1] = space_dim2;
1414
1415 /* allocate memory for data buffer */
1416 data_array1 = (int *)HDmalloc(dims[0] * dims[1] * sizeof(int));
1417 VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
1418
1419 /* set up dimensions of the slab this process accesses */
1420 ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
1421
1422 /* set up the coords array selection */
1423 num_points = block[0] * block[1] * count[0] * count[1];
1424 coords = (hsize_t *)HDmalloc(num_points * RANK * sizeof(hsize_t));
1425 VRFY((coords != NULL), "coords malloc succeeded");
1426 point_set(start, count, stride, block, num_points, coords, mode);
1427
1428 /* Warning: H5Screate_simple requires an array of hsize_t elements
1429 * even if we only pass only a single value. Attempting anything else
1430 * appears to cause problems with 32 bit compilers.
1431 */
1432 file_dataspace = H5Screate_simple(2, dims, NULL);
1433 VRFY((file_dataspace >= 0), "file dataspace created succeeded");
1434
1435 if(ALL != mem_selection) {
1436 mem_dataspace = H5Screate_simple(2, dims, NULL);
1437 VRFY((mem_dataspace >= 0), "mem dataspace created succeeded");
1438 }
1439 else {
1440 /* Putting the warning about H5Screate_simple (above) into practice... */
1441 hsize_t dsdims[1] = {num_points};
1442 mem_dataspace = H5Screate_simple (1, dsdims, NULL);
1443 VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
1444 }
1445
1446 crp_plist = H5Pcreate(H5P_DATASET_CREATE);
1447 VRFY((crp_plist >= 0),"");
1448
1449 /* Set up chunk information. */
1450 chunk_dims[0] = dims[0]/chunk_factor;
1451
1452 /* to decrease the testing time, maintain bigger chunk size */
1453 (chunk_factor == 1) ? (chunk_dims[1] = space_dim2) : (chunk_dims[1] = space_dim2/2);
1454 status = H5Pset_chunk(crp_plist, 2, chunk_dims);
1455 VRFY((status >= 0),"chunk creation property list succeeded");
1456
1457 dataset = H5Dcreate2(file, DSET_COLLECTIVE_CHUNK_NAME, H5T_NATIVE_INT,
1458 file_dataspace, H5P_DEFAULT, crp_plist, H5P_DEFAULT);
1459 VRFY((dataset >= 0),"dataset created succeeded");
1460
1461 status = H5Pclose(crp_plist);
1462 VRFY((status >= 0), "");
1463
1464 /*put some trivial data in the data array */
1465 ccdataset_fill(start, stride, count,block, data_array1, mem_selection);
1466
1467 MESG("data_array initialized");
1468
1469 switch (file_selection) {
1470 case HYPER:
1471 status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
1472 VRFY((status >= 0),"hyperslab selection succeeded");
1473 break;
1474
1475 case POINT:
1476 if (num_points) {
1477 status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
1478 VRFY((status >= 0),"Element selection succeeded");
1479 }
1480 else {
1481 status = H5Sselect_none(file_dataspace);
1482 VRFY((status >= 0),"none selection succeeded");
1483 }
1484 break;
1485
1486 case ALL:
1487 status = H5Sselect_all(file_dataspace);
1488 VRFY((status >= 0), "H5Sselect_all succeeded");
1489 break;
1490 }
1491
1492 switch (mem_selection) {
1493 case HYPER:
1494 status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
1495 VRFY((status >= 0),"hyperslab selection succeeded");
1496 break;
1497
1498 case POINT:
1499 if (num_points) {
1500 status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
1501 VRFY((status >= 0),"Element selection succeeded");
1502 }
1503 else {
1504 status = H5Sselect_none(mem_dataspace);
1505 VRFY((status >= 0),"none selection succeeded");
1506 }
1507 break;
1508
1509 case ALL:
1510 status = H5Sselect_all(mem_dataspace);
1511 VRFY((status >= 0), "H5Sselect_all succeeded");
1512 break;
1513 }
1514
1515 /* set up the collective transfer property list */
1516 xfer_plist = H5Pcreate(H5P_DATASET_XFER);
1517 VRFY((xfer_plist >= 0), "");
1518
1519 status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
1520 VRFY((status>= 0),"MPIO collective transfer property succeeded");
1521 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
1522 status = H5Pset_dxpl_mpio_collective_opt(xfer_plist, H5FD_MPIO_INDIVIDUAL_IO);
1523 VRFY((status>= 0),"set independent IO collectively succeeded");
1524 }
1525
1526 switch(api_option){
1527 case API_LINK_HARD:
1528 status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_ONE_IO);
1529 VRFY((status>= 0),"collective chunk optimization succeeded");
1530 break;
1531
1532 case API_MULTI_HARD:
1533 status = H5Pset_dxpl_mpio_chunk_opt(xfer_plist,H5FD_MPIO_CHUNK_MULTI_IO);
1534 VRFY((status>= 0),"collective chunk optimization succeeded ");
1535 break;
1536
1537 case API_LINK_TRUE:
1538 status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,2);
1539 VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
1540 break;
1541
1542 case API_LINK_FALSE:
1543 status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,6);
1544 VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
1545 break;
1546
1547 case API_MULTI_COLL:
1548 status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
1549 VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
1550 status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,50);
1551 VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
1552 break;
1553
1554 case API_MULTI_IND:
1555 status = H5Pset_dxpl_mpio_chunk_opt_num(xfer_plist,8);/* make sure it is using multi-chunk IO */
1556 VRFY((status>= 0),"collective chunk optimization set chunk number succeeded");
1557 status = H5Pset_dxpl_mpio_chunk_opt_ratio(xfer_plist,100);
1558 VRFY((status>= 0),"collective chunk optimization set chunk ratio succeeded");
1559 break;
1560
1561 default:
1562 ;
1563 }
1564
1565 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
1566 if(facc_type == FACC_MPIO) {
1567 switch(api_option) {
1568 case API_LINK_HARD:
1569 prop_value = H5D_XFER_COLL_CHUNK_DEF;
1570 status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
1571 NULL, NULL, NULL, NULL, NULL, NULL);
1572 VRFY((status >= 0),"testing property list inserted succeeded");
1573 break;
1574
1575 case API_MULTI_HARD:
1576 prop_value = H5D_XFER_COLL_CHUNK_DEF;
1577 status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
1578 NULL, NULL, NULL, NULL, NULL, NULL);
1579 VRFY((status >= 0),"testing property list inserted succeeded");
1580 break;
1581
1582 case API_LINK_TRUE:
1583 prop_value = H5D_XFER_COLL_CHUNK_DEF;
1584 status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
1585 NULL, NULL, NULL, NULL, NULL, NULL);
1586 VRFY((status >= 0),"testing property list inserted succeeded");
1587 break;
1588
1589 case API_LINK_FALSE:
1590 prop_value = H5D_XFER_COLL_CHUNK_DEF;
1591 status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
1592 NULL, NULL, NULL, NULL, NULL, NULL);
1593 VRFY((status >= 0),"testing property list inserted succeeded");
1594 break;
1595
1596 case API_MULTI_COLL:
1597 prop_value = H5D_XFER_COLL_CHUNK_DEF;
1598 status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
1599 NULL, NULL, NULL, NULL, NULL, NULL);
1600 VRFY((status >= 0),"testing property list inserted succeeded");
1601 break;
1602
1603 case API_MULTI_IND:
1604 prop_value = H5D_XFER_COLL_CHUNK_DEF;
1605 status = H5Pinsert2(xfer_plist, H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME, H5D_XFER_COLL_CHUNK_SIZE, &prop_value,
1606 NULL, NULL, NULL, NULL, NULL, NULL);
1607 VRFY((status >= 0),"testing property list inserted succeeded");
1608 break;
1609
1610 default:
1611 ;
1612 }
1613 }
1614 #endif
1615
1616 /* write data collectively */
1617 status = H5Dwrite(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
1618 xfer_plist, data_array1);
1619 VRFY((status >= 0),"dataset write succeeded");
1620
1621 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
1622 if(facc_type == FACC_MPIO) {
1623 switch(api_option){
1624 case API_LINK_HARD:
1625 status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_HARD_NAME,&prop_value);
1626 VRFY((status >= 0),"testing property list get succeeded");
1627 VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO directly succeeded");
1628 break;
1629
1630 case API_MULTI_HARD:
1631 status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_HARD_NAME,&prop_value);
1632 VRFY((status >= 0),"testing property list get succeeded");
1633 VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO optimization succeeded");
1634 break;
1635
1636 case API_LINK_TRUE:
1637 status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_TRUE_NAME,&prop_value);
1638 VRFY((status >= 0),"testing property list get succeeded");
1639 VRFY((prop_value == 0),"API to set LINK COLLECTIVE IO succeeded");
1640 break;
1641
1642 case API_LINK_FALSE:
1643 status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_LINK_NUM_FALSE_NAME,&prop_value);
1644 VRFY((status >= 0),"testing property list get succeeded");
1645 VRFY((prop_value == 0),"API to set LINK IO transferring to multi-chunk IO succeeded");
1646 break;
1647
1648 case API_MULTI_COLL:
1649 status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_COLL_NAME,&prop_value);
1650 VRFY((status >= 0),"testing property list get succeeded");
1651 VRFY((prop_value == 0),"API to set MULTI-CHUNK COLLECTIVE IO with optimization succeeded");
1652 break;
1653
1654 case API_MULTI_IND:
1655 status = H5Pget(xfer_plist,H5D_XFER_COLL_CHUNK_MULTI_RATIO_IND_NAME,&prop_value);
1656 VRFY((status >= 0),"testing property list get succeeded");
1657 VRFY((prop_value == 0),"API to set MULTI-CHUNK IO transferring to independent IO succeeded");
1658 break;
1659
1660 default:
1661 ;
1662 }
1663 }
1664 #endif
1665
1666 status = H5Dclose(dataset);
1667 VRFY((status >= 0),"");
1668
1669 status = H5Pclose(xfer_plist);
1670 VRFY((status >= 0),"property list closed");
1671
1672 status = H5Sclose(file_dataspace);
1673 VRFY((status >= 0),"");
1674
1675 status = H5Sclose(mem_dataspace);
1676 VRFY((status >= 0),"");
1677
1678
1679 status = H5Fclose(file);
1680 VRFY((status >= 0),"");
1681
1682 if (data_array1) HDfree(data_array1);
1683
1684 /* Use collective read to verify the correctness of collective write. */
1685
1686 /* allocate memory for data buffer */
1687 data_array1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
1688 VRFY((data_array1 != NULL), "data_array1 malloc succeeded");
1689
1690 /* allocate memory for data buffer */
1691 data_origin1 = (int *)HDmalloc(dims[0]*dims[1]*sizeof(int));
1692 VRFY((data_origin1 != NULL), "data_origin1 malloc succeeded");
1693
1694 acc_plist = create_faccess_plist(comm, info, facc_type);
1695 VRFY((acc_plist >= 0),"MPIO creation property list succeeded");
1696
1697 file = H5Fopen(FILENAME[0],H5F_ACC_RDONLY,acc_plist);
1698 VRFY((file >= 0),"H5Fcreate succeeded");
1699
1700 status = H5Pclose(acc_plist);
1701 VRFY((status >= 0),"");
1702
1703 /* open the collective dataset*/
1704 dataset = H5Dopen2(file, DSET_COLLECTIVE_CHUNK_NAME, H5P_DEFAULT);
1705 VRFY((dataset >= 0), "");
1706
1707 /* set up dimensions of the slab this process accesses */
1708 ccslab_set(mpi_rank, mpi_size, start, count, stride, block, select_factor);
1709
1710 /* obtain the file and mem dataspace*/
1711 file_dataspace = H5Dget_space (dataset);
1712 VRFY((file_dataspace >= 0), "");
1713
1714 if (ALL != mem_selection) {
1715 mem_dataspace = H5Dget_space (dataset);
1716 VRFY((mem_dataspace >= 0), "");
1717 }
1718 else {
1719 /* Warning: H5Screate_simple requires an array of hsize_t elements
1720 * even if we only pass only a single value. Attempting anything else
1721 * appears to cause problems with 32 bit compilers.
1722 */
1723 hsize_t dsdims[1] = {num_points};
1724 mem_dataspace = H5Screate_simple (1, dsdims, NULL);
1725 VRFY((mem_dataspace >= 0), "mem_dataspace create succeeded");
1726 }
1727
1728 switch (file_selection) {
1729 case HYPER:
1730 status = H5Sselect_hyperslab(file_dataspace, H5S_SELECT_SET, start, stride, count, block);
1731 VRFY((status >= 0),"hyperslab selection succeeded");
1732 break;
1733
1734 case POINT:
1735 if (num_points) {
1736 status = H5Sselect_elements(file_dataspace, H5S_SELECT_SET, num_points, coords);
1737 VRFY((status >= 0),"Element selection succeeded");
1738 }
1739 else {
1740 status = H5Sselect_none(file_dataspace);
1741 VRFY((status >= 0),"none selection succeeded");
1742 }
1743 break;
1744
1745 case ALL:
1746 status = H5Sselect_all(file_dataspace);
1747 VRFY((status >= 0), "H5Sselect_all succeeded");
1748 break;
1749 }
1750
1751 switch (mem_selection) {
1752 case HYPER:
1753 status = H5Sselect_hyperslab(mem_dataspace, H5S_SELECT_SET, start, stride, count, block);
1754 VRFY((status >= 0),"hyperslab selection succeeded");
1755 break;
1756
1757 case POINT:
1758 if (num_points) {
1759 status = H5Sselect_elements(mem_dataspace, H5S_SELECT_SET, num_points, coords);
1760 VRFY((status >= 0),"Element selection succeeded");
1761 }
1762 else {
1763 status = H5Sselect_none(mem_dataspace);
1764 VRFY((status >= 0),"none selection succeeded");
1765 }
1766 break;
1767
1768 case ALL:
1769 status = H5Sselect_all(mem_dataspace);
1770 VRFY((status >= 0), "H5Sselect_all succeeded");
1771 break;
1772 }
1773
1774 /* fill dataset with test data */
1775 ccdataset_fill(start, stride,count,block, data_origin1, mem_selection);
1776 xfer_plist = H5Pcreate (H5P_DATASET_XFER);
1777 VRFY((xfer_plist >= 0),"");
1778
1779 status = H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
1780 VRFY((status>= 0),"MPIO collective transfer property succeeded");
1781 if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
1782 status = H5Pset_dxpl_mpio_collective_opt(xfer_plist,H5FD_MPIO_INDIVIDUAL_IO);
1783 VRFY((status>= 0),"set independent IO collectively succeeded");
1784 }
1785
1786 status = H5Dread(dataset, H5T_NATIVE_INT, mem_dataspace, file_dataspace,
1787 xfer_plist, data_array1);
1788 VRFY((status >=0),"dataset read succeeded");
1789
1790 /* verify the read data with original expected data */
1791 status = ccdataset_vrfy(start, count, stride, block, data_array1, data_origin1, mem_selection);
1792 if (status) nerrors++;
1793
1794 status = H5Pclose(xfer_plist);
1795 VRFY((status >= 0),"property list closed");
1796
1797 /* close dataset collectively */
1798 status=H5Dclose(dataset);
1799 VRFY((status >= 0), "H5Dclose");
1800
1801 /* release all IDs created */
1802 status = H5Sclose(file_dataspace);
1803 VRFY((status >= 0),"H5Sclose");
1804
1805 status = H5Sclose(mem_dataspace);
1806 VRFY((status >= 0),"H5Sclose");
1807
1808 /* close the file collectively */
1809 status = H5Fclose(file);
1810 VRFY((status >= 0),"H5Fclose");
1811
1812 /* release data buffers */
1813 if(coords) HDfree(coords);
1814 if(data_array1) HDfree(data_array1);
1815 if(data_origin1) HDfree(data_origin1);
1816
1817 }
1818
1819
1820
1821 /*****************************************************************************
1822 *
1823 * Function: do_express_test()
1824 *
1825 * Purpose: Do an MPI_Allreduce to obtain the maximum value returned
1826 * by GetTestExpress() across all processes. Return this
1827 * value.
1828 *
1829 * Envirmoment variables can be different across different
1830 * processes. This function ensures that all processes agree
1831 * on whether to do an express test.
1832 *
1833 * Return: Success: Maximum of the values returned by
1834 * GetTestExpress() across all processes.
1835 *
1836 * Failure: -1
1837 *
1838 * Programmer: JRM -- 4/25/06
1839 *
1840 *****************************************************************************/
1841 static int
do_express_test(int world_mpi_rank)1842 do_express_test(int world_mpi_rank)
1843 {
1844 int express_test;
1845 int max_express_test;
1846 int result;
1847
1848 express_test = GetTestExpress();
1849
1850 result = MPI_Allreduce((void *)&express_test,
1851 (void *)&max_express_test,
1852 1,
1853 MPI_INT,
1854 MPI_MAX,
1855 MPI_COMM_WORLD);
1856
1857 if ( result != MPI_SUCCESS ) {
1858 nerrors++;
1859 max_express_test = -1;
1860 if ( VERBOSE_MED && (world_mpi_rank == 0)) {
1861 HDfprintf(stdout, "%d:%s: MPI_Allreduce() failed.\n",
1862 world_mpi_rank, FUNC );
1863 }
1864 }
1865
1866 return(max_express_test);
1867
1868 } /* do_express_test() */
1869
1870
main(int argc,char ** argv)1871 int main(int argc, char **argv)
1872 {
1873 int ExpressMode = 0;
1874 hsize_t newsize = 1048576;
1875 /* Set the bigio processing limit to be 'newsize' bytes */
1876 hsize_t oldsize = H5S_mpio_set_bigio_count(newsize);
1877
1878 /* Having set the bigio handling to a size that is managable,
1879 * we'll set our 'bigcount' variable to be 2X that limit so
1880 * that we try to ensure that our bigio handling is actually
1881 * envoked and tested.
1882 */
1883 if (newsize != oldsize) {
1884 bigcount = newsize * 2;
1885 }
1886
1887 MPI_Init(&argc, &argv);
1888 MPI_Comm_size(MPI_COMM_WORLD,&mpi_size);
1889 MPI_Comm_rank(MPI_COMM_WORLD,&mpi_rank);
1890
1891 /* Attempt to turn off atexit post processing so that in case errors
1892 * happen during the test and the process is aborted, it will not get
1893 * hang in the atexit post processing in which it may try to make MPI
1894 * calls. By then, MPI calls may not work.
1895 */
1896 if (H5dont_atexit() < 0){
1897 HDprintf("Failed to turn off atexit processing. Continue.\n");
1898 };
1899
1900 /* set alarm. */
1901 ALARM_ON;
1902
1903 ExpressMode = do_express_test(mpi_rank);
1904
1905 dataset_big_write();
1906 MPI_Barrier(MPI_COMM_WORLD);
1907
1908 dataset_big_read();
1909 MPI_Barrier(MPI_COMM_WORLD);
1910
1911 if (ExpressMode > 0) {
1912 if (mpi_rank == 0)
1913 HDprintf("***Express test mode on. Several tests are skipped\n");
1914 }
1915 else {
1916 coll_chunk1();
1917 MPI_Barrier(MPI_COMM_WORLD);
1918 coll_chunk2();
1919 MPI_Barrier(MPI_COMM_WORLD);
1920 coll_chunk3();
1921 }
1922
1923 /* turn off alarm */
1924 ALARM_OFF;
1925
1926 if (mpi_rank == 0)
1927 HDremove(FILENAME[0]);
1928
1929 /* close HDF5 library */
1930 H5close();
1931
1932 MPI_Finalize();
1933
1934 return 0;
1935 }
1936
1937