1 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
2  * Copyright by The HDF Group.                                               *
3  * Copyright by the Board of Trustees of the University of Illinois.         *
4  * All rights reserved.                                                      *
5  *                                                                           *
6  * This file is part of HDF5.  The full HDF5 copyright notice, including     *
7  * terms governing use, modification, and redistribution, is contained in    *
8  * the COPYING file, which can be found at the root of the source code       *
9  * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.  *
10  * If you do not have access to either file, you may request a copy from     *
11  * help@hdfgroup.org.                                                        *
12  * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
13 
14 #include "testphdf5.h"
15 #include "H5Dprivate.h"
16 
17 #define DIM  2
18 #define SIZE 32
19 #define NDATASET 4
20 #define GROUP_DEPTH 128
21 enum obj_type { is_group, is_dset };
22 
23 
24 static int  get_size(void);
25 static void write_dataset(hid_t, hid_t, hid_t);
26 static int  read_dataset(hid_t, hid_t, hid_t);
27 static void create_group_recursive(hid_t, hid_t, hid_t, int);
28 static void recursive_read_group(hid_t, hid_t, hid_t, int);
29 static void group_dataset_read(hid_t fid, int mpi_rank, int m);
30 static void write_attribute(hid_t, int, int);
31 static int  read_attribute(hid_t, int, int);
32 static int  check_value(DATATYPE *, DATATYPE *, int);
33 static void get_slab(hsize_t[], hsize_t[], hsize_t[], hsize_t[], int);
34 
35 
36 /*
37  * The size value computed by this function is used extensively in
38  * configuring tests for the current number of processes.
39  *
40  * This function was created as part of an effort to allow the
41  * test functions in this file to run on an arbitrary number of
42  * processors.
43  *                                       JRM - 8/11/04
44  */
45 
46 static int
get_size(void)47 get_size(void)
48 {
49     int mpi_rank;
50     int mpi_size;
51     int size = SIZE;
52 
53     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); /* needed for VRFY */
54     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
55 
56     if(mpi_size > size ) {
57         if((mpi_size % 2) == 0 ) {
58             size = mpi_size;
59         } else {
60             size = mpi_size + 1;
61         }
62     }
63 
64     VRFY((mpi_size <= size), "mpi_size <= size");
65     VRFY(((size % 2) == 0), "size isn't even");
66 
67     return(size);
68 
69 } /* get_size() */
70 
71 /*
72  * Example of using PHDF5 to create a zero sized dataset.
73  *
74  */
zero_dim_dset(void)75 void zero_dim_dset(void)
76 {
77     int          mpi_size, mpi_rank;
78     const char    *filename;
79     hid_t        fid, plist, dcpl, dsid, sid;
80     hsize_t      dim, chunk_dim;
81     herr_t       ret;
82     int          data[1];
83 
84     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
85     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
86 
87     filename = GetTestParameters();
88 
89     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
90     VRFY((plist>=0), "create_faccess_plist succeeded");
91 
92     fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
93     VRFY((fid>=0), "H5Fcreate succeeded");
94     ret = H5Pclose(plist);
95     VRFY((ret>=0), "H5Pclose succeeded");
96 
97     dcpl = H5Pcreate(H5P_DATASET_CREATE);
98     VRFY((dcpl>=0), "failed H5Pcreate");
99 
100     /* Set 1 chunk size */
101     chunk_dim = 1;
102     ret = H5Pset_chunk(dcpl, 1, &chunk_dim);
103     VRFY((ret>=0), "failed H5Pset_chunk");
104 
105     /* Create 1D dataspace with 0 dim size */
106     dim = 0;
107     sid = H5Screate_simple(1, &dim, NULL);
108     VRFY((sid>=0), "failed H5Screate_simple");
109 
110     /* Create chunked dataset */
111     dsid = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
112     VRFY((dsid>=0), "failed H5Dcreate2");
113 
114     /* write 0 elements from dataset */
115     ret = H5Dwrite(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
116     VRFY((ret>=0), "failed H5Dwrite");
117 
118     /* Read 0 elements from dataset */
119     ret = H5Dread(dsid, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data);
120     VRFY((ret>=0), "failed H5Dread");
121 
122     H5Pclose(dcpl);
123     H5Dclose(dsid);
124     H5Sclose(sid);
125     H5Fclose(fid);
126 }
127 
128 /*
129  * Example of using PHDF5 to create ndatasets datasets.  Each process write
130  * a slab of array to the file.
131  *
132  * Changes:    Updated function to use a dynamically calculated size,
133  *        instead of the old SIZE #define.  This should allow it
134  *        to function with an arbitrary number of processors.
135  *
136  *                        JRM - 8/11/04
137  */
multiple_dset_write(void)138 void multiple_dset_write(void)
139 {
140     int   i, j, n, mpi_size, mpi_rank, size;
141     hid_t iof, plist, dataset, memspace, filespace;
142     hid_t dcpl;                         /* Dataset creation property list */
143     hsize_t chunk_origin [DIM];
144     hsize_t chunk_dims [DIM], file_dims [DIM];
145     hsize_t count[DIM]={1,1};
146     double *outme = NULL;
147     double fill=1.0;                    /* Fill value */
148     char   dname [100];
149     herr_t ret;
150     const  H5Ptest_param_t *pt;
151     char  *filename;
152     int    ndatasets;
153 
154     pt = GetTestParameters();
155     filename = pt->name;
156     ndatasets = pt->count;
157 
158     size = get_size();
159 
160     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
161     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
162 
163     outme = HDmalloc((size_t)(size * size * sizeof(double)));
164     VRFY((outme != NULL), "HDmalloc succeeded for outme");
165 
166     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
167     VRFY((plist>=0), "create_faccess_plist succeeded");
168     iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
169     VRFY((iof>=0), "H5Fcreate succeeded");
170     ret = H5Pclose(plist);
171     VRFY((ret>=0), "H5Pclose succeeded");
172 
173     /* decide the hyperslab according to process number. */
174     get_slab(chunk_origin, chunk_dims, count, file_dims, size);
175 
176     memspace = H5Screate_simple(DIM, chunk_dims, NULL);
177     filespace = H5Screate_simple(DIM, file_dims, NULL);
178     ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims, count, chunk_dims);
179     VRFY((ret>=0), "mdata hyperslab selection");
180 
181     /* Create a dataset creation property list */
182     dcpl = H5Pcreate(H5P_DATASET_CREATE);
183     VRFY((dcpl>=0), "dataset creation property list succeeded");
184 
185     ret = H5Pset_fill_value(dcpl, H5T_NATIVE_DOUBLE, &fill);
186     VRFY((ret>=0), "set fill-value succeeded");
187 
188     for(n = 0; n < ndatasets; n++) {
189     HDsprintf(dname, "dataset %d", n);
190     dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
191     VRFY((dataset > 0), dname);
192 
193     /* calculate data to write */
194     for(i = 0; i < size; i++)
195         for(j = 0; j < size; j++)
196             outme [(i * size) + j] = n*1000 + mpi_rank;
197 
198     H5Dwrite(dataset, H5T_NATIVE_DOUBLE, memspace, filespace, H5P_DEFAULT, outme);
199 
200     H5Dclose(dataset);
201 #ifdef BARRIER_CHECKS
202     if(!((n+1) % 10)) {
203         HDprintf("created %d datasets\n", n+1);
204         MPI_Barrier(MPI_COMM_WORLD);
205     }
206 #endif /* BARRIER_CHECKS */
207     }
208 
209     H5Sclose(filespace);
210     H5Sclose(memspace);
211     H5Pclose(dcpl);
212     H5Fclose(iof);
213 
214     HDfree(outme);
215 }
216 
217 
218 /* Example of using PHDF5 to create, write, and read compact dataset.
219  *
220  * Changes:    Updated function to use a dynamically calculated size,
221  *        instead of the old SIZE #define.  This should allow it
222  *        to function with an arbitrary number of processors.
223  *
224  *                        JRM - 8/11/04
225  */
compact_dataset(void)226 void compact_dataset(void)
227 {
228     int     i, j, mpi_size, mpi_rank, size, err_num=0;
229     hid_t   iof, plist, dcpl, dxpl, dataset, filespace;
230     hsize_t file_dims [DIM];
231     double *outme;
232     double *inme;
233     char    dname[]="dataset";
234     herr_t  ret;
235     const char *filename;
236 
237     size = get_size();
238 
239     for(i = 0; i < DIM; i++ )
240         file_dims[i] = size;
241 
242     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
243     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
244 
245     outme = HDmalloc((size_t)(size * size * sizeof(double)));
246     VRFY((outme != NULL), "HDmalloc succeeded for outme");
247 
248     inme = HDmalloc((size_t)(size * size * sizeof(double)));
249     VRFY((outme != NULL), "HDmalloc succeeded for inme");
250 
251     filename = GetTestParameters();
252     VRFY((mpi_size <= size), "mpi_size <= size");
253 
254     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
255     iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
256 
257     /* Define data space */
258     filespace = H5Screate_simple(DIM, file_dims, NULL);
259 
260     /* Create a compact dataset */
261     dcpl = H5Pcreate(H5P_DATASET_CREATE);
262     VRFY((dcpl>=0), "dataset creation property list succeeded");
263     ret = H5Pset_layout(dcpl, H5D_COMPACT);
264     VRFY((dcpl >= 0), "set property list for compact dataset");
265     ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
266     VRFY((ret >= 0), "set space allocation time for compact dataset");
267 
268     dataset = H5Dcreate2(iof, dname, H5T_NATIVE_DOUBLE, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
269     VRFY((dataset >= 0), "H5Dcreate2 succeeded");
270 
271     /* set up the collective transfer properties list */
272     dxpl = H5Pcreate(H5P_DATASET_XFER);
273     VRFY((dxpl >= 0), "");
274     ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
275     VRFY((ret >= 0), "H5Pcreate xfer succeeded");
276     if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
277         ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
278         VRFY((ret>= 0),"set independent IO collectively succeeded");
279     }
280 
281 
282     /* Recalculate data to write.  Each process writes the same data. */
283     for(i = 0; i < size; i++)
284          for(j = 0; j < size; j++)
285              outme[(i * size) + j] =(i + j) * 1000;
286 
287     ret = H5Dwrite(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, outme);
288     VRFY((ret >= 0), "H5Dwrite succeeded");
289 
290     H5Pclose(dcpl);
291     H5Pclose(plist);
292     H5Dclose(dataset);
293     H5Sclose(filespace);
294     H5Fclose(iof);
295 
296     /* Open the file and dataset, read and compare the data. */
297     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
298     iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
299     VRFY((iof >= 0), "H5Fopen succeeded");
300 
301     /* set up the collective transfer properties list */
302     dxpl = H5Pcreate(H5P_DATASET_XFER);
303     VRFY((dxpl >= 0), "");
304     ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
305     VRFY((ret >= 0), "H5Pcreate xfer succeeded");
306     if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
307         ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
308         VRFY((ret>= 0),"set independent IO collectively succeeded");
309     }
310 
311     dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
312     VRFY((dataset >= 0), "H5Dopen2 succeeded");
313 
314 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
315     hbool_t prop_value;
316     prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
317     ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
318                      NULL, NULL, NULL, NULL, NULL, NULL);
319     VRFY((ret >= 0), "H5Pinsert2() succeeded");
320 #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
321 
322     ret = H5Dread(dataset, H5T_NATIVE_DOUBLE, H5S_ALL, H5S_ALL, dxpl, inme);
323     VRFY((ret >= 0), "H5Dread succeeded");
324 
325 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
326     prop_value = FALSE;
327     ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
328     VRFY((ret >= 0), "H5Pget succeeded");
329     VRFY((prop_value == FALSE && dxfer_coll_type == DXFER_COLLECTIVE_IO),"rank 0 Bcast optimization was performed for a compact dataset");
330 #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
331 
332     /* Verify data value */
333     for(i = 0; i < size; i++)
334         for(j = 0; j < size; j++)
335             if(inme[(i * size) + j] != outme[(i * size) + j])
336                 if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
337                     HDprintf("Dataset Verify failed at [%d][%d]: expect %f, got %f\n", i, j, outme[(i * size) + j], inme[(i * size) + j]);
338 
339     H5Pclose(plist);
340     H5Pclose(dxpl);
341     H5Dclose(dataset);
342     H5Fclose(iof);
343     HDfree(inme);
344     HDfree(outme);
345 }
346 
347 /*
348  * Example of using PHDF5 to create, write, and read dataset and attribute
349  * of Null dataspace.
350  *
351  * Changes:    Removed the assert that mpi_size <= the SIZE #define.
352  *        As best I can tell, this assert isn't needed here,
353  *        and in any case, the SIZE #define is being removed
354  *        in an update of the functions in this file to run
355  *        with an arbitrary number of processes.
356  *
357  *                                         JRM - 8/24/04
358  */
null_dataset(void)359 void null_dataset(void)
360 {
361     int      mpi_size, mpi_rank;
362     hid_t    iof, plist, dxpl, dataset, attr, sid;
363     unsigned uval=2;    /* Buffer for writing to dataset */
364     int      val=1;          /* Buffer for writing to attribute */
365     int      nelem;
366     char     dname[]="dataset";
367     char     attr_name[]="attribute";
368     herr_t   ret;
369     const char *filename;
370 
371     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
372     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
373 
374     filename = GetTestParameters();
375 
376     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
377     iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
378 
379     /* Define data space */
380     sid = H5Screate(H5S_NULL);
381 
382     /* Check that the null dataspace actually has 0 elements */
383     nelem = H5Sget_simple_extent_npoints(sid);
384     VRFY((nelem == 0), "H5Sget_simple_extent_npoints");
385 
386     /* Create a compact dataset */
387     dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
388     VRFY((dataset >= 0), "H5Dcreate2 succeeded");
389 
390     /* set up the collective transfer properties list */
391     dxpl = H5Pcreate(H5P_DATASET_XFER);
392     VRFY((dxpl >= 0), "");
393     ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
394     VRFY((ret >= 0), "H5Pcreate xfer succeeded");
395     if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
396         ret = H5Pset_dxpl_mpio_collective_opt(dxpl, H5FD_MPIO_INDIVIDUAL_IO);
397         VRFY((ret>= 0),"set independent IO collectively succeeded");
398     }
399 
400 
401     /* Write "nothing" to the dataset(with type conversion) */
402     ret = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, &uval);
403     VRFY((ret >= 0), "H5Dwrite succeeded");
404 
405     /* Create an attribute for the group */
406     attr = H5Acreate2(dataset, attr_name, H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT);
407     VRFY((attr >= 0), "H5Acreate2");
408 
409     /* Write "nothing" to the attribute(with type conversion) */
410     ret = H5Awrite(attr, H5T_NATIVE_INT, &val);
411     VRFY((ret >= 0), "H5Awrite");
412 
413     H5Aclose(attr);
414     H5Dclose(dataset);
415     H5Pclose(plist);
416     H5Sclose(sid);
417     H5Fclose(iof);
418 
419     /* Open the file and dataset, read and compare the data. */
420     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
421     iof = H5Fopen(filename, H5F_ACC_RDONLY, plist);
422     VRFY((iof >= 0), "H5Fopen succeeded");
423 
424     /* set up the collective transfer properties list */
425     dxpl = H5Pcreate(H5P_DATASET_XFER);
426     VRFY((dxpl >= 0), "");
427     ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
428     VRFY((ret >= 0), "H5Pcreate xfer succeeded");
429     if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
430         ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
431         VRFY((ret>= 0),"set independent IO collectively succeeded");
432     }
433 
434 
435     dataset = H5Dopen2(iof, dname, H5P_DEFAULT);
436     VRFY((dataset >= 0), "H5Dopen2 succeeded");
437 
438     /* Try reading from the dataset(make certain our buffer is unmodified) */
439     ret = H5Dread(dataset, H5T_NATIVE_UINT, H5S_ALL, H5S_ALL, dxpl, &uval);
440     VRFY((ret>=0), "H5Dread");
441     VRFY((uval==2), "H5Dread");
442 
443     /* Open the attribute for the dataset */
444     attr = H5Aopen(dataset, attr_name, H5P_DEFAULT);
445     VRFY((attr >= 0), "H5Aopen");
446 
447     /* Try reading from the attribute(make certain our buffer is unmodified) */    ret = H5Aread(attr, H5T_NATIVE_INT, &val);
448     VRFY((ret>=0), "H5Aread");
449     VRFY((val==1), "H5Aread");
450 
451     H5Pclose(plist);
452     H5Pclose(dxpl);
453     H5Aclose(attr);
454     H5Dclose(dataset);
455     H5Fclose(iof);
456 }
457 
458 /* Example of using PHDF5 to create "large" datasets. (>2GB, >4GB, >8GB)
459  * Actual data is _not_ written to these datasets.  Dataspaces are exact
460  * sizes(2GB, 4GB, etc.), but the metadata for the file pushes the file over
461  * the boundary of interest.
462  *
463  * Changes:    Removed the assert that mpi_size <= the SIZE #define.
464  *        As best I can tell, this assert isn't needed here,
465  *        and in any case, the SIZE #define is being removed
466  *        in an update of the functions in this file to run
467  *        with an arbitrary number of processes.
468  *
469  *                                         JRM - 8/11/04
470  */
big_dataset(void)471 void big_dataset(void)
472 {
473     int mpi_size, mpi_rank;     /* MPI info */
474     hid_t iof,                  /* File ID */
475         fapl,                   /* File access property list ID */
476         dataset,                /* Dataset ID */
477         filespace;              /* Dataset's dataspace ID */
478     hsize_t file_dims [4];      /* Dimensions of dataspace */
479     char dname[]="dataset";     /* Name of dataset */
480     MPI_Offset file_size;       /* Size of file on disk */
481     herr_t ret;                 /* Generic return value */
482     const char *filename;
483 
484     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
485     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
486 
487     /* Verify MPI_Offset can handle larger than 2GB sizes */
488     VRFY((sizeof(MPI_Offset) > 4), "sizeof(MPI_Offset)>4");
489 
490     filename = GetTestParameters();
491 
492     fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
493     VRFY((fapl >= 0), "create_faccess_plist succeeded");
494 
495     /*
496      * Create >2GB HDF5 file
497      */
498     iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
499     VRFY((iof >= 0), "H5Fcreate succeeded");
500 
501     /* Define dataspace for 2GB dataspace */
502     file_dims[0]= 2;
503     file_dims[1]= 1024;
504     file_dims[2]= 1024;
505     file_dims[3]= 1024;
506     filespace = H5Screate_simple(4, file_dims, NULL);
507     VRFY((filespace >= 0), "H5Screate_simple succeeded");
508 
509     dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
510     VRFY((dataset >= 0), "H5Dcreate2 succeeded");
511 
512     /* Close all file objects */
513     ret = H5Dclose(dataset);
514     VRFY((ret >= 0), "H5Dclose succeeded");
515     ret = H5Sclose(filespace);
516     VRFY((ret >= 0), "H5Sclose succeeded");
517     ret = H5Fclose(iof);
518     VRFY((ret >= 0), "H5Fclose succeeded");
519 
520     /* Check that file of the correct size was created */
521     file_size = h5_get_file_size(filename, fapl);
522     VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)");
523 
524     /*
525      * Create >4GB HDF5 file
526      */
527     iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
528     VRFY((iof >= 0), "H5Fcreate succeeded");
529 
530     /* Define dataspace for 4GB dataspace */
531     file_dims[0]= 4;
532     file_dims[1]= 1024;
533     file_dims[2]= 1024;
534     file_dims[3]= 1024;
535     filespace = H5Screate_simple(4, file_dims, NULL);
536     VRFY((filespace >= 0), "H5Screate_simple succeeded");
537 
538     dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
539     VRFY((dataset >= 0), "H5Dcreate2 succeeded");
540 
541     /* Close all file objects */
542     ret = H5Dclose(dataset);
543     VRFY((ret >= 0), "H5Dclose succeeded");
544     ret = H5Sclose(filespace);
545     VRFY((ret >= 0), "H5Sclose succeeded");
546     ret = H5Fclose(iof);
547     VRFY((ret >= 0), "H5Fclose succeeded");
548 
549     /* Check that file of the correct size was created */
550     file_size = h5_get_file_size(filename, fapl);
551     VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)");
552 
553     /*
554      * Create >8GB HDF5 file
555      */
556     iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
557     VRFY((iof >= 0), "H5Fcreate succeeded");
558 
559     /* Define dataspace for 8GB dataspace */
560     file_dims[0]= 8;
561     file_dims[1]= 1024;
562     file_dims[2]= 1024;
563     file_dims[3]= 1024;
564     filespace = H5Screate_simple(4, file_dims, NULL);
565     VRFY((filespace >= 0), "H5Screate_simple succeeded");
566 
567     dataset = H5Dcreate2(iof, dname, H5T_NATIVE_UCHAR, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
568     VRFY((dataset >= 0), "H5Dcreate2 succeeded");
569 
570     /* Close all file objects */
571     ret = H5Dclose(dataset);
572     VRFY((ret >= 0), "H5Dclose succeeded");
573     ret = H5Sclose(filespace);
574     VRFY((ret >= 0), "H5Sclose succeeded");
575     ret = H5Fclose(iof);
576     VRFY((ret >= 0), "H5Fclose succeeded");
577 
578     /* Check that file of the correct size was created */
579     file_size = h5_get_file_size(filename, fapl);
580     VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)");
581 
582     /* Close fapl */
583     ret = H5Pclose(fapl);
584     VRFY((ret >= 0), "H5Pclose succeeded");
585 }
586 
587 /* Example of using PHDF5 to read a partial written dataset.   The dataset does
588  * not have actual data written to the entire raw data area and relies on the
589  * default fill value of zeros to work correctly.
590  *
591  * Changes:    Removed the assert that mpi_size <= the SIZE #define.
592  *        As best I can tell, this assert isn't needed here,
593  *        and in any case, the SIZE #define is being removed
594  *        in an update of the functions in this file to run
595  *        with an arbitrary number of processes.
596  *
597  *        Also added code to free dynamically allocated buffers.
598  *
599  *                                         JRM - 8/11/04
600  */
dataset_fillvalue(void)601 void dataset_fillvalue(void)
602 {
603     int mpi_size, mpi_rank;     /* MPI info */
604     int err_num;                /* Number of errors */
605     hid_t iof,                  /* File ID */
606         fapl,                   /* File access property list ID */
607         dxpl,                   /* Data transfer property list ID */
608         dataset,                /* Dataset ID */
609         memspace,               /* Memory dataspace ID */
610         filespace;              /* Dataset's dataspace ID */
611     char dname[]="dataset";     /* Name of dataset */
612     hsize_t     dset_dims[4] = {0, 6, 7, 8};
613     hsize_t     req_start[4] = {0, 0, 0, 0};
614     hsize_t     req_count[4] = {1, 6, 7, 8};
615     hsize_t     dset_size;      /* Dataset size */
616     int *rdata, *wdata;         /* Buffers for data to read and write */
617     int *twdata, *trdata;       /* Temporary pointer into buffer */
618     int acc, i, ii, j, k, l;    /* Local index variables */
619     herr_t ret;                 /* Generic return value */
620     const char *filename;
621 
622     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
623     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
624 
625     filename = GetTestParameters();
626 
627     /* Set the dataset dimension to be one row more than number of processes */
628     /* and calculate the actual dataset size. */
629     dset_dims[0]=mpi_size+1;
630     dset_size=dset_dims[0]*dset_dims[1]*dset_dims[2]*dset_dims[3];
631 
632     /* Allocate space for the buffers */
633     rdata=HDmalloc((size_t)(dset_size*sizeof(int)));
634     VRFY((rdata != NULL), "HDcalloc succeeded for read buffer");
635     wdata=HDmalloc((size_t)(dset_size*sizeof(int)));
636     VRFY((wdata != NULL), "HDmalloc succeeded for write buffer");
637 
638     fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
639     VRFY((fapl >= 0), "create_faccess_plist succeeded");
640 
641     /*
642      * Create HDF5 file
643      */
644     iof = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
645     VRFY((iof >= 0), "H5Fcreate succeeded");
646 
647     filespace = H5Screate_simple(4, dset_dims, NULL);
648     VRFY((filespace >= 0), "File H5Screate_simple succeeded");
649 
650     dataset = H5Dcreate2(iof, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
651     VRFY((dataset >= 0), "H5Dcreate2 succeeded");
652 
653     memspace = H5Screate_simple(4, dset_dims, NULL);
654     VRFY((memspace >= 0), "Memory H5Screate_simple succeeded");
655 
656     /*
657      * Read dataset before any data is written.
658      */
659 
660     /* Create DXPL for I/O */
661     dxpl = H5Pcreate(H5P_DATASET_XFER);
662     VRFY((dxpl >= 0), "H5Pcreate succeeded");
663 
664 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
665       hbool_t prop_value;
666       prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
667       ret = H5Pinsert2(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, H5D_XFER_COLL_RANK0_BCAST_SIZE, &prop_value,
668                    NULL, NULL, NULL, NULL, NULL, NULL);
669       VRFY((ret >= 0),"testing property list inserted succeeded");
670 #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
671 
672     for(ii = 0; ii < 2; ii++) {
673 
674       if(ii == 0)
675         ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
676       else
677         ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
678       VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
679 
680       /* set entire read buffer with the constant 2 */
681       HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
682 
683       /* Read the entire dataset back */
684       ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
685       VRFY((ret >= 0), "H5Dread succeeded");
686 
687 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
688       prop_value = FALSE;
689       ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
690       VRFY((ret >= 0), "testing property list get succeeded");
691       if(ii == 0)
692         VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
693       else
694         VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
695 #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
696 
697       /* Verify all data read are the fill value 0 */
698       trdata = rdata;
699       err_num = 0;
700       for(i = 0; i < (int)dset_dims[0]; i++)
701         for(j = 0; j < (int)dset_dims[1]; j++)
702           for(k = 0; k < (int)dset_dims[2]; k++)
703             for(l = 0; l < (int)dset_dims[3]; l++, twdata++, trdata++)
704               if(*trdata != 0)
705                 if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
706                   HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i, j, k, l, *trdata);
707       if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
708         HDprintf("[more errors ...]\n");
709       if(err_num) {
710         HDprintf("%d errors found in check_value\n", err_num);
711         nerrors++;
712       }
713     }
714 
715     /* Barrier to ensure all processes have completed the above test. */
716     MPI_Barrier(MPI_COMM_WORLD);
717 
718     /*
719      * Each process writes 1 row of data. Thus last row is not written.
720      */
721     /* Create hyperslabs in memory and file dataspaces */
722     req_start[0]=mpi_rank;
723     ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
724     VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
725     ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, req_start, NULL, req_count, NULL);
726     VRFY((ret >= 0), "H5Sselect_hyperslab succeeded on memory dataspace");
727 
728     ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
729     VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
730     if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
731      ret = H5Pset_dxpl_mpio_collective_opt(dxpl,H5FD_MPIO_INDIVIDUAL_IO);
732      VRFY((ret>= 0),"set independent IO collectively succeeded");
733     }
734 
735 
736     /* Fill write buffer with some values */
737     twdata=wdata;
738     for(i=0, acc=0; i<(int)dset_dims[0]; i++)
739         for(j=0; j<(int)dset_dims[1]; j++)
740             for(k=0; k<(int)dset_dims[2]; k++)
741                 for(l=0; l<(int)dset_dims[3]; l++)
742                     *twdata++ = acc++;
743 
744     /* Collectively write a hyperslab of data to the dataset */
745     ret = H5Dwrite(dataset, H5T_NATIVE_INT, memspace, filespace, dxpl, wdata);
746     VRFY((ret >= 0), "H5Dwrite succeeded");
747 
748     /* Barrier here, to allow processes to sync */
749     MPI_Barrier(MPI_COMM_WORLD);
750 
751     /*
752      * Read dataset after partial write.
753      */
754 
755 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
756       prop_value = H5D_XFER_COLL_RANK0_BCAST_DEF;
757       ret = H5Pset(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
758       VRFY((ret >= 0), " H5Pset succeeded");
759 #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
760 
761     for(ii = 0; ii < 2; ii++) {
762 
763       if(ii == 0)
764         ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_INDEPENDENT);
765       else
766         ret = H5Pset_dxpl_mpio(dxpl, H5FD_MPIO_COLLECTIVE);
767       VRFY((ret >= 0), "H5Pset_dxpl_mpio succeeded");
768 
769       /* set entire read buffer with the constant 2 */
770       HDmemset(rdata,2,(size_t)(dset_size*sizeof(int)));
771 
772       /* Read the entire dataset back */
773       ret = H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, rdata);
774       VRFY((ret >= 0), "H5Dread succeeded");
775 
776 #ifdef H5_HAVE_INSTRUMENTED_LIBRARY
777       prop_value = FALSE;
778       ret = H5Pget(dxpl, H5D_XFER_COLL_RANK0_BCAST_NAME, &prop_value);
779       VRFY((ret >= 0), "testing property list get succeeded");
780       if(ii == 0)
781         VRFY((prop_value == FALSE), "correctly handled rank 0 Bcast");
782       else
783         VRFY((prop_value == TRUE), "correctly handled rank 0 Bcast");
784 #endif /* H5_HAVE_INSTRUMENTED_LIBRARY */
785 
786       /* Verify correct data read */
787       twdata=wdata;
788       trdata=rdata;
789       err_num=0;
790       for(i=0; i<(int)dset_dims[0]; i++)
791         for(j=0; j<(int)dset_dims[1]; j++)
792           for(k=0; k<(int)dset_dims[2]; k++)
793             for(l=0; l<(int)dset_dims[3]; l++, twdata++, trdata++)
794               if(i<mpi_size) {
795                 if(*twdata != *trdata )
796                   if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
797                     HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect %d, got %d\n", i,j,k,l, *twdata, *trdata);
798               } /* end if */
799               else {
800                 if(*trdata != 0)
801                   if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
802                     HDprintf("Dataset Verify failed at [%d][%d][%d][%d]: expect 0, got %d\n", i,j,k,l, *trdata);
803               } /* end else */
804       if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
805         HDprintf("[more errors ...]\n");
806       if(err_num){
807         HDprintf("%d errors found in check_value\n", err_num);
808         nerrors++;
809       }
810     }
811 
812     /* Close all file objects */
813     ret = H5Dclose(dataset);
814     VRFY((ret >= 0), "H5Dclose succeeded");
815     ret = H5Sclose(filespace);
816     VRFY((ret >= 0), "H5Sclose succeeded");
817     ret = H5Fclose(iof);
818     VRFY((ret >= 0), "H5Fclose succeeded");
819 
820     /* Close memory dataspace */
821     ret = H5Sclose(memspace);
822     VRFY((ret >= 0), "H5Sclose succeeded");
823 
824     /* Close dxpl */
825     ret = H5Pclose(dxpl);
826     VRFY((ret >= 0), "H5Pclose succeeded");
827 
828     /* Close fapl */
829     ret = H5Pclose(fapl);
830     VRFY((ret >= 0), "H5Pclose succeeded");
831 
832     /* free the buffers */
833     HDfree(rdata);
834     HDfree(wdata);
835 }
836 
837 /* combined cngrpw and ingrpr tests because ingrpr reads file created by cngrpw. */
collective_group_write_independent_group_read(void)838 void collective_group_write_independent_group_read(void)
839 {
840     collective_group_write();
841     independent_group_read();
842 }
843 
844 /* Write multiple groups with a chunked dataset in each group collectively.
845  * These groups and datasets are for testing independent read later.
846  *
847  * Changes:     Updated function to use a dynamically calculated size,
848  *              instead of the old SIZE #define.  This should allow it
849  *              to function with an arbitrary number of processors.
850  *
851  *                                              JRM - 8/16/04
852  */
collective_group_write(void)853 void collective_group_write(void)
854 {
855     int   mpi_rank, mpi_size, size;
856     int   i, j, m;
857     char  gname[64], dname[32];
858     hid_t fid, gid, did, plist, dcpl, memspace, filespace;
859     DATATYPE *outme = NULL;
860     hsize_t   chunk_origin[DIM];
861     hsize_t   chunk_dims[DIM], file_dims[DIM], count[DIM];
862     hsize_t   chunk_size[2];  /* Chunk dimensions - computed shortly */
863     herr_t    ret1, ret2;
864     const H5Ptest_param_t *pt;
865     char     *filename;
866     int       ngroups;
867 
868     pt = GetTestParameters();
869     filename = pt->name;
870     ngroups = pt->count;
871 
872     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
873     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
874 
875     size = get_size();
876 
877     chunk_size[0] =(hsize_t)(size / 2);
878     chunk_size[1] =(hsize_t)(size / 2);
879 
880     outme = HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
881     VRFY((outme != NULL), "HDmalloc succeeded for outme");
882 
883     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
884     fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
885     H5Pclose(plist);
886 
887     /* decide the hyperslab according to process number. */
888     get_slab(chunk_origin, chunk_dims, count, file_dims, size);
889 
890     /* select hyperslab in memory and file spaces.  These two operations are
891      * identical since the datasets are the same. */
892     memspace  = H5Screate_simple(DIM, file_dims, NULL);
893     ret1 = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
894                                chunk_dims, count, chunk_dims);
895     filespace = H5Screate_simple(DIM, file_dims,  NULL);
896     ret2 = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
897                                chunk_dims, count, chunk_dims);
898     VRFY((memspace>=0), "memspace");
899     VRFY((filespace>=0), "filespace");
900     VRFY((ret1>=0), "mgroup memspace selection");
901     VRFY((ret2>=0), "mgroup filespace selection");
902 
903     dcpl = H5Pcreate(H5P_DATASET_CREATE);
904     ret1 = H5Pset_chunk(dcpl, 2, chunk_size);
905     VRFY((dcpl>=0), "dataset creation property");
906     VRFY((ret1>=0), "set chunk for dataset creation property");
907 
908     /* creates ngroups groups under the root group, writes chunked
909      * datasets in parallel. */
910     for(m = 0; m < ngroups; m++) {
911         HDsprintf(gname, "group%d", m);
912         gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
913         VRFY((gid > 0), gname);
914 
915         HDsprintf(dname, "dataset%d", m);
916         did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
917         VRFY((did > 0), dname);
918 
919         for(i = 0; i < size; i++)
920             for(j = 0; j < size; j++)
921                 outme[(i * size) + j] =(i + j) * 1000 + mpi_rank;
922 
923         H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
924 
925         H5Dclose(did);
926         H5Gclose(gid);
927 
928 #ifdef BARRIER_CHECKS
929         if(!((m+1) % 10)) {
930             HDprintf("created %d groups\n", m+1);
931             MPI_Barrier(MPI_COMM_WORLD);
932         }
933 #endif /* BARRIER_CHECKS */
934     }
935 
936     H5Pclose(dcpl);
937     H5Sclose(filespace);
938     H5Sclose(memspace);
939     H5Fclose(fid);
940 
941     HDfree(outme);
942 }
943 
944 /* Let two sets of processes open and read different groups and chunked
945  * datasets independently.
946  */
independent_group_read(void)947 void independent_group_read(void)
948 {
949     int      mpi_rank, m;
950     hid_t    plist, fid;
951     const H5Ptest_param_t *pt;
952     char    *filename;
953     int      ngroups;
954 
955     pt = GetTestParameters();
956     filename = pt->name;
957     ngroups = pt->count;
958 
959     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
960 
961     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
962     H5Pset_all_coll_metadata_ops(plist, FALSE);
963 
964     fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
965     H5Pclose(plist);
966 
967     /* open groups and read datasets. Odd number processes read even number
968      * groups from the end; even number processes read odd number groups
969      * from the beginning. */
970     if(mpi_rank%2==0) {
971         for(m=ngroups-1; m==0; m-=2)
972             group_dataset_read(fid, mpi_rank, m);
973     } else {
974         for(m=0; m<ngroups; m+=2)
975             group_dataset_read(fid, mpi_rank, m);
976     }
977 
978     H5Fclose(fid);
979 }
980 
981 /* Open and read datasets and compare data
982  *
983  * Changes:     Updated function to use a dynamically calculated size,
984  *              instead of the old SIZE #define.  This should allow it
985  *              to function with an arbitrary number of processors.
986  *
987  *        Also added code to verify the results of dynamic memory
988  *        allocations, and to free dynamically allocated memeory
989  *        when we are done with it.
990  *
991  *                                              JRM - 8/16/04
992  */
993 static void
group_dataset_read(hid_t fid,int mpi_rank,int m)994 group_dataset_read(hid_t fid, int mpi_rank, int m)
995 {
996     int      ret, i, j, size;
997     char     gname[64], dname[32];
998     hid_t    gid, did;
999     DATATYPE *outdata = NULL;
1000     DATATYPE *indata = NULL;
1001 
1002     size = get_size();
1003 
1004     indata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
1005     VRFY((indata != NULL), "HDmalloc succeeded for indata");
1006 
1007     outdata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
1008     VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
1009 
1010     /* open every group under root group. */
1011     HDsprintf(gname, "group%d", m);
1012     gid = H5Gopen2(fid, gname, H5P_DEFAULT);
1013     VRFY((gid > 0), gname);
1014 
1015     /* check the data. */
1016     HDsprintf(dname, "dataset%d", m);
1017     did = H5Dopen2(gid, dname, H5P_DEFAULT);
1018     VRFY((did>0), dname);
1019 
1020     H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, indata);
1021 
1022     /* this is the original value */
1023     for(i=0; i<size; i++)
1024        for(j=0; j<size; j++) {
1025            outdata[(i * size) + j] =(i+j)*1000 + mpi_rank;
1026        }
1027 
1028     /* compare the original value(outdata) to the value in file(indata).*/
1029     ret = check_value(indata, outdata, size);
1030     VRFY((ret==0), "check the data");
1031 
1032     H5Dclose(did);
1033     H5Gclose(gid);
1034 
1035     HDfree(indata);
1036     HDfree(outdata);
1037 }
1038 
1039 /*
1040  * Example of using PHDF5 to create multiple groups.  Under the root group,
1041  * it creates ngroups groups.  Under the first group just created, it creates
1042  * recursive subgroups of depth GROUP_DEPTH.  In each created group, it
1043  * generates NDATASETS datasets.  Each process write a hyperslab of an array
1044  * into the file.  The structure is like
1045  *
1046  *                             root group
1047  *                                 |
1048  *            ---------------------------- ... ... ------------------------
1049  *           |          |         |        ... ...  |                      |
1050  *       group0*+'   group1*+' group2*+'   ... ...             group ngroups*+'
1051  *           |
1052  *      1st_child_group*'
1053  *           |
1054  *      2nd_child_group*'
1055  *           |
1056  *           :
1057  *           :
1058  *           |
1059  * GROUP_DEPTHth_child_group*'
1060  *
1061  *      * means the group has dataset(s).
1062  *      + means the group has attribute(s).
1063  *      ' means the datasets in the groups have attribute(s).
1064  *
1065  * Changes:     Updated function to use a dynamically calculated size,
1066  *              instead of the old SIZE #define.  This should allow it
1067  *              to function with an arbitrary number of processors.
1068  *
1069  *                                              JRM - 8/16/04
1070  */
multiple_group_write(void)1071 void multiple_group_write(void)
1072 {
1073     int     mpi_rank, mpi_size, size;
1074     int     m;
1075     char    gname[64];
1076     hid_t   fid, gid, plist, memspace, filespace;
1077     hsize_t chunk_origin[DIM];
1078     hsize_t chunk_dims[DIM], file_dims[DIM], count[DIM];
1079     herr_t  ret;
1080     const H5Ptest_param_t *pt;
1081     char   *filename;
1082     int     ngroups;
1083 
1084     pt = GetTestParameters();
1085     filename = pt->name;
1086     ngroups = pt->count;
1087 
1088     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1089     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1090 
1091     size = get_size();
1092 
1093     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
1094     fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist);
1095     H5Pclose(plist);
1096 
1097     /* decide the hyperslab according to process number. */
1098     get_slab(chunk_origin, chunk_dims, count, file_dims, size);
1099 
1100     /* select hyperslab in memory and file spaces.  These two operations are
1101      * identical since the datasets are the same. */
1102     memspace  = H5Screate_simple(DIM, file_dims, NULL);
1103     VRFY((memspace>=0), "memspace");
1104     ret = H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin,
1105                                chunk_dims, count, chunk_dims);
1106     VRFY((ret>=0), "mgroup memspace selection");
1107 
1108     filespace = H5Screate_simple(DIM, file_dims,  NULL);
1109     VRFY((filespace>=0), "filespace");
1110     ret = H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin,
1111                                chunk_dims, count, chunk_dims);
1112     VRFY((ret>=0), "mgroup filespace selection");
1113 
1114     /* creates ngroups groups under the root group, writes datasets in
1115      * parallel. */
1116     for(m = 0; m < ngroups; m++) {
1117         HDsprintf(gname, "group%d", m);
1118         gid = H5Gcreate2(fid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1119         VRFY((gid > 0), gname);
1120 
1121         /* create attribute for these groups. */
1122     write_attribute(gid, is_group, m);
1123 
1124         if(m != 0)
1125         write_dataset(memspace, filespace, gid);
1126 
1127         H5Gclose(gid);
1128 
1129 #ifdef BARRIER_CHECKS
1130         if(!((m+1) % 10)) {
1131             HDprintf("created %d groups\n", m+1);
1132             MPI_Barrier(MPI_COMM_WORLD);
1133     }
1134 #endif /* BARRIER_CHECKS */
1135     }
1136 
1137     /* recursively creates subgroups under the first group. */
1138     gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
1139     create_group_recursive(memspace, filespace, gid, 0);
1140     ret = H5Gclose(gid);
1141     VRFY((ret>=0), "H5Gclose");
1142 
1143     ret = H5Sclose(filespace);
1144     VRFY((ret>=0), "H5Sclose");
1145     ret = H5Sclose(memspace);
1146     VRFY((ret>=0), "H5Sclose");
1147     ret = H5Fclose(fid);
1148     VRFY((ret>=0), "H5Fclose");
1149 }
1150 
1151 /*
1152  * In a group, creates NDATASETS datasets.  Each process writes a hyperslab
1153  * of a data array to the file.
1154  *
1155  * Changes:     Updated function to use a dynamically calculated size,
1156  *              instead of the old SIZE #define.  This should allow it
1157  *              to function with an arbitrary number of processors.
1158  *
1159  *                                              JRM - 8/16/04
1160  */
1161 static void
write_dataset(hid_t memspace,hid_t filespace,hid_t gid)1162 write_dataset(hid_t memspace, hid_t filespace, hid_t gid)
1163 {
1164     int       i, j, n, size;
1165     int       mpi_rank, mpi_size;
1166     char      dname[32];
1167     DATATYPE *outme = NULL;
1168     hid_t     did;
1169 
1170     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1171     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1172 
1173     size = get_size();
1174 
1175     outme = HDmalloc((size_t)(size * size * sizeof(double)));
1176     VRFY((outme != NULL), "HDmalloc succeeded for outme");
1177 
1178     for(n = 0; n < NDATASET; n++) {
1179          HDsprintf(dname, "dataset%d", n);
1180          did = H5Dcreate2(gid, dname, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1181          VRFY((did > 0), dname);
1182 
1183          for(i = 0; i < size; i++)
1184              for(j = 0; j < size; j++)
1185                 outme[(i * size) + j] = n * 1000 + mpi_rank;
1186 
1187          H5Dwrite(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, outme);
1188 
1189          /* create attribute for these datasets.*/
1190          write_attribute(did, is_dset, n);
1191 
1192          H5Dclose(did);
1193     }
1194     HDfree(outme);
1195 }
1196 
1197 /*
1198  * Creates subgroups of depth GROUP_DEPTH recursively.  Also writes datasets
1199  * in parallel in each group.
1200  */
1201 static void
create_group_recursive(hid_t memspace,hid_t filespace,hid_t gid,int counter)1202 create_group_recursive(hid_t memspace, hid_t filespace, hid_t gid, int counter)
1203 {
1204    hid_t child_gid;
1205    int   mpi_rank;
1206    char  gname[64];
1207 
1208    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1209 
1210 #ifdef BARRIER_CHECKS
1211    if(!((counter+1) % 10)) {
1212         HDprintf("created %dth child groups\n", counter+1);
1213         MPI_Barrier(MPI_COMM_WORLD);
1214    }
1215 #endif /* BARRIER_CHECKS */
1216 
1217    HDsprintf(gname, "%dth_child_group", counter+1);
1218    child_gid = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1219    VRFY((child_gid > 0), gname);
1220 
1221    /* write datasets in parallel. */
1222    write_dataset(memspace, filespace, gid);
1223 
1224    if(counter < GROUP_DEPTH )
1225        create_group_recursive(memspace, filespace, child_gid, counter+1);
1226 
1227    H5Gclose(child_gid);
1228 }
1229 
1230 /*
1231  * This function is to verify the data from multiple group testing.  It opens
1232  * every dataset in every group and check their correctness.
1233  *
1234  * Changes:     Updated function to use a dynamically calculated size,
1235  *              instead of the old SIZE #define.  This should allow it
1236  *              to function with an arbitrary number of processors.
1237  *
1238  *                                              JRM - 8/11/04
1239  */
multiple_group_read(void)1240 void multiple_group_read(void)
1241 {
1242     int      mpi_rank, mpi_size, error_num, size;
1243     int      m;
1244     char     gname[64];
1245     hid_t    plist, fid, gid, memspace, filespace;
1246     hsize_t  chunk_origin[DIM];
1247     hsize_t  chunk_dims[DIM], file_dims[DIM], count[DIM];
1248     const H5Ptest_param_t *pt;
1249     char    *filename;
1250     int      ngroups;
1251 
1252     pt = GetTestParameters();
1253     filename = pt->name;
1254     ngroups = pt->count;
1255 
1256     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1257     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1258 
1259     size = get_size();
1260 
1261     plist = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
1262     fid = H5Fopen(filename, H5F_ACC_RDONLY, plist);
1263     H5Pclose(plist);
1264 
1265     /* decide hyperslab for each process */
1266     get_slab(chunk_origin, chunk_dims, count, file_dims, size);
1267 
1268     /* select hyperslab for memory and file space */
1269     memspace  = H5Screate_simple(DIM, file_dims, NULL);
1270     H5Sselect_hyperslab(memspace, H5S_SELECT_SET, chunk_origin, chunk_dims,
1271                         count, chunk_dims);
1272     filespace = H5Screate_simple(DIM, file_dims, NULL);
1273     H5Sselect_hyperslab(filespace, H5S_SELECT_SET, chunk_origin, chunk_dims,
1274                         count, chunk_dims);
1275 
1276     /* open every group under root group. */
1277     for(m=0; m<ngroups; m++) {
1278         HDsprintf(gname, "group%d", m);
1279         gid = H5Gopen2(fid, gname, H5P_DEFAULT);
1280         VRFY((gid > 0), gname);
1281 
1282         /* check the data. */
1283         if(m != 0)
1284             if((error_num = read_dataset(memspace, filespace, gid))>0)
1285             nerrors += error_num;
1286 
1287         /* check attribute.*/
1288         error_num = 0;
1289         if((error_num = read_attribute(gid, is_group, m))>0 )
1290         nerrors += error_num;
1291 
1292         H5Gclose(gid);
1293 
1294 #ifdef BARRIER_CHECKS
1295         if(!((m+1)%10))
1296             MPI_Barrier(MPI_COMM_WORLD);
1297 #endif /* BARRIER_CHECKS */
1298     }
1299 
1300     /* open all the groups in vertical direction. */
1301     gid = H5Gopen2(fid, "group0", H5P_DEFAULT);
1302     VRFY((gid>0), "group0");
1303     recursive_read_group(memspace, filespace, gid, 0);
1304     H5Gclose(gid);
1305 
1306     H5Sclose(filespace);
1307     H5Sclose(memspace);
1308     H5Fclose(fid);
1309 
1310 }
1311 
1312 /*
1313  * This function opens all the datasets in a certain, checks the data using
1314  * dataset_vrfy function.
1315  *
1316  * Changes:     Updated function to use a dynamically calculated size,
1317  *              instead of the old SIZE #define.  This should allow it
1318  *              to function with an arbitrary number of processors.
1319  *
1320  *                                              JRM - 8/11/04
1321  */
1322 static int
read_dataset(hid_t memspace,hid_t filespace,hid_t gid)1323 read_dataset(hid_t memspace, hid_t filespace, hid_t gid)
1324 {
1325     int       i, j, n, mpi_rank, mpi_size, size, attr_errors=0, vrfy_errors=0;
1326     char      dname[32];
1327     DATATYPE *outdata = NULL, *indata = NULL;
1328     hid_t     did;
1329 
1330     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1331     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1332 
1333     size = get_size();
1334 
1335     indata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
1336     VRFY((indata != NULL), "HDmalloc succeeded for indata");
1337 
1338     outdata =(DATATYPE*)HDmalloc((size_t)(size * size * sizeof(DATATYPE)));
1339     VRFY((outdata != NULL), "HDmalloc succeeded for outdata");
1340 
1341     for(n=0; n<NDATASET; n++) {
1342         HDsprintf(dname, "dataset%d", n);
1343         did = H5Dopen2(gid, dname, H5P_DEFAULT);
1344         VRFY((did>0), dname);
1345 
1346         H5Dread(did, H5T_NATIVE_INT, memspace, filespace, H5P_DEFAULT, indata);
1347 
1348         /* this is the original value */
1349         for(i=0; i<size; i++)
1350         for(j=0; j<size; j++) {
1351             *outdata = n*1000 + mpi_rank;
1352                  outdata++;
1353         }
1354         outdata -= size * size;
1355 
1356         /* compare the original value(outdata) to the value in file(indata).*/
1357         vrfy_errors = check_value(indata, outdata, size);
1358 
1359         /* check attribute.*/
1360         if((attr_errors = read_attribute(did, is_dset, n))>0 )
1361             vrfy_errors += attr_errors;
1362 
1363         H5Dclose(did);
1364     }
1365 
1366     HDfree(indata);
1367     HDfree(outdata);
1368 
1369     return vrfy_errors;
1370 }
1371 
1372 /*
1373  * This recursive function opens all the groups in vertical direction and
1374  * checks the data.
1375  */
1376 static void
recursive_read_group(hid_t memspace,hid_t filespace,hid_t gid,int counter)1377 recursive_read_group(hid_t memspace, hid_t filespace, hid_t gid, int counter)
1378 {
1379     hid_t child_gid;
1380     int   mpi_rank, err_num=0;
1381     char  gname[64];
1382 
1383     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1384 #ifdef BARRIER_CHECKS
1385     if((counter+1) % 10)
1386         MPI_Barrier(MPI_COMM_WORLD);
1387 #endif /* BARRIER_CHECKS */
1388 
1389     if((err_num = read_dataset(memspace, filespace, gid)) )
1390         nerrors += err_num;
1391 
1392     if(counter < GROUP_DEPTH ) {
1393         HDsprintf(gname, "%dth_child_group", counter+1);
1394         child_gid = H5Gopen2(gid, gname, H5P_DEFAULT);
1395         VRFY((child_gid>0), gname);
1396         recursive_read_group(memspace, filespace, child_gid, counter+1);
1397         H5Gclose(child_gid);
1398     }
1399 }
1400 
1401 /* Create and write attribute for a group or a dataset.  For groups, attribute
1402  * is a scalar datum; for dataset, it is a one-dimensional array.
1403  */
1404 static void
write_attribute(hid_t obj_id,int this_type,int num)1405 write_attribute(hid_t obj_id, int this_type, int num)
1406 {
1407     hid_t   sid, aid;
1408     hsize_t dspace_dims[1]={8};
1409     int     i, mpi_rank, attr_data[8], dspace_rank=1;
1410     char    attr_name[32];
1411 
1412     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1413 
1414     if(this_type == is_group) {
1415         HDsprintf(attr_name, "Group Attribute %d", num);
1416         sid = H5Screate(H5S_SCALAR);
1417         aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
1418         H5Awrite(aid, H5T_NATIVE_INT,  &num);
1419         H5Aclose(aid);
1420         H5Sclose(sid);
1421     } /* end if */
1422     else if(this_type == is_dset) {
1423         HDsprintf(attr_name, "Dataset Attribute %d", num);
1424         for(i=0; i<8; i++)
1425             attr_data[i] = i;
1426         sid = H5Screate_simple(dspace_rank, dspace_dims, NULL);
1427         aid = H5Acreate2(obj_id, attr_name, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT);
1428         H5Awrite(aid, H5T_NATIVE_INT, attr_data);
1429         H5Aclose(aid);
1430         H5Sclose(sid);
1431     } /* end else-if */
1432 
1433 }
1434 
1435 /* Read and verify attribute for group or dataset. */
1436 static int
read_attribute(hid_t obj_id,int this_type,int num)1437 read_attribute(hid_t obj_id, int this_type, int num)
1438 {
1439     hid_t aid;
1440     hsize_t group_block[2]={1,1}, dset_block[2]={1, 8};
1441     int  i, mpi_rank, in_num, in_data[8], out_data[8], vrfy_errors = 0;
1442     char attr_name[32];
1443 
1444     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1445 
1446     if(this_type == is_group) {
1447         HDsprintf(attr_name, "Group Attribute %d", num);
1448         aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
1449         if(MAINPROCESS) {
1450             H5Aread(aid, H5T_NATIVE_INT, &in_num);
1451             vrfy_errors =  dataset_vrfy(NULL, NULL, NULL, group_block, &in_num, &num);
1452         }
1453         H5Aclose(aid);
1454     }
1455     else if(this_type == is_dset) {
1456         HDsprintf(attr_name, "Dataset Attribute %d", num);
1457         for(i=0; i<8; i++)
1458             out_data[i] = i;
1459         aid = H5Aopen(obj_id, attr_name, H5P_DEFAULT);
1460         if(MAINPROCESS) {
1461             H5Aread(aid, H5T_NATIVE_INT, in_data);
1462             vrfy_errors = dataset_vrfy(NULL, NULL, NULL, dset_block, in_data, out_data);
1463         }
1464         H5Aclose(aid);
1465     }
1466 
1467     return vrfy_errors;
1468 }
1469 
1470 /* This functions compares the original data with the read-in data for its
1471  * hyperslab part only by process ID.
1472  *
1473  * Changes:    Modified function to use a passed in size parameter
1474  *        instead of the old SIZE #define.  This should let us
1475  *        run with an arbitrary number of processes.
1476  *
1477  *                    JRM - 8/16/04
1478  */
1479 static int
check_value(DATATYPE * indata,DATATYPE * outdata,int size)1480 check_value(DATATYPE *indata, DATATYPE *outdata, int size)
1481 {
1482     int      mpi_rank, mpi_size, err_num=0;
1483     hsize_t  i, j;
1484     hsize_t  chunk_origin[DIM];
1485     hsize_t  chunk_dims[DIM], count[DIM];
1486 
1487     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1488     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1489 
1490     get_slab(chunk_origin, chunk_dims, count, NULL, size);
1491 
1492     indata += chunk_origin[0]*size;
1493     outdata += chunk_origin[0]*size;
1494     for(i=chunk_origin[0]; i<(chunk_origin[0]+chunk_dims[0]); i++)
1495         for(j=chunk_origin[1]; j<(chunk_origin[1]+chunk_dims[1]); j++) {
1496             if(*indata != *outdata )
1497                 if(err_num++ < MAX_ERR_REPORT || VERBOSE_MED)
1498                     HDprintf("Dataset Verify failed at [%lu][%lu](row %lu, col%lu): expect %d, got %d\n",(unsigned long)i,(unsigned long)j,(unsigned long)i,(unsigned long)j, *outdata, *indata);
1499         }
1500     if(err_num > MAX_ERR_REPORT && !VERBOSE_MED)
1501         HDprintf("[more errors ...]\n");
1502     if(err_num)
1503         HDprintf("%d errors found in check_value\n", err_num);
1504     return err_num;
1505 }
1506 
1507 /* Decide the portion of data chunk in dataset by process ID.
1508  *
1509  * Changes:    Modified function to use a passed in size parameter
1510  *        instead of the old SIZE #define.  This should let us
1511  *        run with an arbitrary number of processes.
1512  *
1513  *                    JRM - 8/11/04
1514  */
1515 
1516 static void
get_slab(hsize_t chunk_origin[],hsize_t chunk_dims[],hsize_t count[],hsize_t file_dims[],int size)1517 get_slab(hsize_t chunk_origin[], hsize_t chunk_dims[], hsize_t count[],
1518     hsize_t file_dims[], int size)
1519 {
1520     int mpi_rank, mpi_size;
1521 
1522     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1523     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1524 
1525     if(chunk_origin != NULL) {
1526         chunk_origin[0] = mpi_rank *(size/mpi_size);
1527         chunk_origin[1] = 0;
1528     }
1529     if(chunk_dims != NULL) {
1530         chunk_dims[0]   = size/mpi_size;
1531         chunk_dims[1]   = size;
1532     }
1533     if(file_dims != NULL)
1534         file_dims[0] = file_dims[1] = size;
1535     if(count != NULL)
1536         count[0] = count[1] = 1;
1537 }
1538 
1539 /*
1540  * This function is based on bug demonstration code provided by Thomas
1541  * Guignon(thomas.guignon@ifp.fr), and is intended to verify the
1542  * correctness of my fix for that bug.
1543  *
1544  * In essence, the bug appeared when at least one process attempted to
1545  * write a point selection -- for which collective I/O is not supported,
1546  * and at least one other attempted to write some other type of selection
1547  * for which collective I/O is supported.
1548  *
1549  * Since the processes did not compare notes before performing the I/O,
1550  * some would attempt collective I/O while others performed independent
1551  * I/O.  A hang resulted.
1552  *
1553  * This function reproduces this situation.  At present the test hangs
1554  * on failure.
1555  *                                         JRM - 9/13/04
1556  *
1557  * Changes:    None.
1558  */
1559 
1560 #define N 4
1561 
io_mode_confusion(void)1562 void io_mode_confusion(void)
1563 {
1564     /*
1565      * HDF5 APIs definitions
1566      */
1567 
1568     const int   rank = 1;
1569     const char *dataset_name = "IntArray";
1570 
1571     hid_t       file_id, dset_id;         /* file and dataset identifiers */
1572     hid_t       filespace, memspace;      /* file and memory dataspace */
1573                                           /* identifiers               */
1574     hsize_t     dimsf[1];                 /* dataset dimensions */
1575     int         data[N] = {1};            /* pointer to data buffer to write */
1576     hsize_t     coord[N] = {0L,1L,2L,3L};
1577     hid_t       plist_id;                 /* property list identifier */
1578     herr_t      status;
1579 
1580 
1581     /*
1582      * MPI variables
1583      */
1584 
1585     int mpi_size, mpi_rank;
1586 
1587 
1588     /*
1589      * test bed related variables
1590      */
1591 
1592     const char *    fcn_name = "io_mode_confusion";
1593     const hbool_t    verbose = FALSE;
1594     const H5Ptest_param_t *    pt;
1595     char *        filename;
1596 
1597 
1598     pt = GetTestParameters();
1599     filename = pt->name;
1600 
1601     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1602     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1603 
1604     /*
1605      * Set up file access property list with parallel I/O access
1606      */
1607 
1608     if(verbose )
1609         HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
1610                   mpi_rank, fcn_name);
1611 
1612     plist_id = H5Pcreate(H5P_FILE_ACCESS);
1613     VRFY((plist_id != -1), "H5Pcreate() failed");
1614 
1615     status = H5Pset_fapl_mpio(plist_id, MPI_COMM_WORLD, MPI_INFO_NULL);
1616     VRFY((status >= 0 ), "H5Pset_fapl_mpio() failed");
1617 
1618 
1619     /*
1620      * Create a new file collectively and release property list identifier.
1621      */
1622 
1623     if(verbose )
1624         HDfprintf(stdout, "%0d:%s: Creating new file.\n", mpi_rank, fcn_name);
1625 
1626     file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, plist_id);
1627     VRFY((file_id >= 0 ), "H5Fcreate() failed");
1628 
1629     status = H5Pclose(plist_id);
1630     VRFY((status >= 0 ), "H5Pclose() failed");
1631 
1632 
1633     /*
1634      * Create the dataspace for the dataset.
1635      */
1636 
1637     if(verbose )
1638         HDfprintf(stdout, "%0d:%s: Creating the dataspace for the dataset.\n",
1639                   mpi_rank, fcn_name);
1640 
1641     dimsf[0] = N;
1642     filespace = H5Screate_simple(rank, dimsf, NULL);
1643     VRFY((filespace >= 0 ), "H5Screate_simple() failed.");
1644 
1645 
1646     /*
1647      * Create the dataset with default properties and close filespace.
1648      */
1649 
1650     if(verbose )
1651         HDfprintf(stdout,
1652                   "%0d:%s: Creating the dataset, and closing filespace.\n",
1653                   mpi_rank, fcn_name);
1654 
1655     dset_id = H5Dcreate2(file_id, dataset_name, H5T_NATIVE_INT, filespace, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
1656     VRFY((dset_id >= 0 ), "H5Dcreate2() failed");
1657 
1658     status = H5Sclose(filespace);
1659     VRFY((status >= 0 ), "H5Sclose() failed");
1660 
1661 
1662     if(verbose )
1663         HDfprintf(stdout, "%0d:%s: Calling H5Screate_simple().\n",
1664                 mpi_rank, fcn_name);
1665 
1666     memspace = H5Screate_simple(rank, dimsf, NULL);
1667     VRFY((memspace >= 0 ), "H5Screate_simple() failed.");
1668 
1669 
1670     if(mpi_rank == 0 ) {
1671         if(verbose )
1672             HDfprintf(stdout, "%0d:%s: Calling H5Sselect_all(memspace).\n",
1673                       mpi_rank, fcn_name);
1674 
1675         status = H5Sselect_all(memspace);
1676         VRFY((status >= 0 ), "H5Sselect_all() failed");
1677     } else {
1678         if(verbose )
1679             HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none(memspace).\n",
1680                       mpi_rank, fcn_name);
1681 
1682         status = H5Sselect_none(memspace);
1683         VRFY((status >= 0 ), "H5Sselect_none() failed");
1684     }
1685 
1686 
1687     if(verbose )
1688         HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
1689                   mpi_rank, fcn_name);
1690 
1691     MPI_Barrier(MPI_COMM_WORLD);
1692 
1693 
1694     if(verbose )
1695         HDfprintf(stdout, "%0d:%s: Calling H5Dget_space().\n",
1696                   mpi_rank, fcn_name);
1697 
1698     filespace = H5Dget_space(dset_id);
1699     VRFY((filespace >= 0 ), "H5Dget_space() failed");
1700 
1701 
1702     /* select all */
1703     if(mpi_rank == 0 ) {
1704         if(verbose )
1705              HDfprintf(stdout,
1706                        "%0d:%s: Calling H5Sselect_elements() -- set up hang?\n",
1707                        mpi_rank, fcn_name);
1708 
1709         status = H5Sselect_elements(filespace, H5S_SELECT_SET, N, (const hsize_t *)&coord);
1710         VRFY((status >= 0 ), "H5Sselect_elements() failed");
1711     } else { /* select nothing */
1712         if(verbose )
1713             HDfprintf(stdout, "%0d:%s: Calling H5Sselect_none().\n",
1714                       mpi_rank, fcn_name);
1715 
1716         status = H5Sselect_none(filespace);
1717         VRFY((status >= 0 ), "H5Sselect_none() failed");
1718     }
1719 
1720     if(verbose )
1721         HDfprintf(stdout, "%0d:%s: Calling MPI_Barrier().\n",
1722                   mpi_rank, fcn_name);
1723 
1724     MPI_Barrier(MPI_COMM_WORLD);
1725 
1726 
1727     if(verbose )
1728         HDfprintf(stdout, "%0d:%s: Calling H5Pcreate().\n", mpi_rank, fcn_name);
1729 
1730     plist_id = H5Pcreate(H5P_DATASET_XFER);
1731     VRFY((plist_id != -1 ), "H5Pcreate() failed");
1732 
1733 
1734     if(verbose )
1735         HDfprintf(stdout, "%0d:%s: Calling H5Pset_dxpl_mpio().\n",
1736                   mpi_rank, fcn_name);
1737 
1738     status = H5Pset_dxpl_mpio(plist_id, H5FD_MPIO_COLLECTIVE);
1739     VRFY((status >= 0 ), "H5Pset_dxpl_mpio() failed");
1740     if(dxfer_coll_type == DXFER_INDEPENDENT_IO) {
1741         status = H5Pset_dxpl_mpio_collective_opt(plist_id, H5FD_MPIO_INDIVIDUAL_IO);
1742         VRFY((status>= 0),"set independent IO collectively succeeded");
1743     }
1744 
1745 
1746 
1747 
1748     if(verbose )
1749         HDfprintf(stdout, "%0d:%s: Calling H5Dwrite() -- hang here?.\n",
1750                   mpi_rank, fcn_name);
1751 
1752     status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace, filespace,
1753                       plist_id, data);
1754 
1755     if(verbose )
1756         HDfprintf(stdout, "%0d:%s: Returned from H5Dwrite(), status=%d.\n",
1757                   mpi_rank, fcn_name, status);
1758     VRFY((status >= 0 ), "H5Dwrite() failed");
1759 
1760     /*
1761      * Close/release resources.
1762      */
1763 
1764     if(verbose )
1765         HDfprintf(stdout, "%0d:%s: Cleaning up from test.\n",
1766                   mpi_rank, fcn_name);
1767 
1768     status = H5Dclose(dset_id);
1769     VRFY((status >= 0 ), "H5Dclose() failed");
1770 
1771     status = H5Sclose(filespace);
1772     VRFY((status >= 0 ), "H5Dclose() failed");
1773 
1774     status = H5Sclose(memspace);
1775     VRFY((status >= 0 ), "H5Sclose() failed");
1776 
1777     status = H5Pclose(plist_id);
1778     VRFY((status >= 0 ), "H5Pclose() failed");
1779 
1780     status = H5Fclose(file_id);
1781     VRFY((status >= 0 ), "H5Fclose() failed");
1782 
1783 
1784     if(verbose )
1785         HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
1786 
1787     return;
1788 
1789 } /* io_mode_confusion() */
1790 
1791 #undef N
1792 
1793 /*
1794  * At present, the object header code maintains an image of its on disk
1795  * representation, which is updates as necessary instead of generating on
1796  * request.
1797  *
1798  * Prior to the fix that this test in designed to verify, the image of the
1799  * on disk representation was only updated on flush -- not when the object
1800  * header was marked clean.
1801  *
1802  * This worked perfectly well as long as all writes of a given object
1803  * header were written from a single process.  However, with the implementation
1804  * of round robin metadata data writes in parallel HDF5, this is no longer
1805  * the case -- it is possible for a given object header to be flushed from
1806  * several different processes, with the object header simply being marked
1807  * clean in all other processes on each flush.  This resulted in NULL or
1808  * out of data object header information being written to disk.
1809  *
1810  * To repair this, I modified the object header code to update its
1811  * on disk image both on flush on when marked clean.
1812  *
1813  * This test is directed at verifying that the fix performs as expected.
1814  *
1815  * The test functions by creating a HDF5 file with several small datasets,
1816  * and then flushing the file.  This should result of at least one of
1817  * the associated object headers being flushed by a process other than
1818  * process 0.
1819  *
1820  * Then for each data set, add an attribute and flush the file again.
1821  *
1822  * Close the file and re-open it.
1823  *
1824  * Open the each of the data sets in turn.  If all opens are successful,
1825  * the test passes.  Otherwise the test fails.
1826  *
1827  * Note that this test will probably become irrelevent shortly, when we
1828  * land the journaling modifications on the trunk -- at which point all
1829  * cache clients will have to construct on disk images on demand.
1830  *
1831  *                        JRM -- 10/13/10
1832  *
1833  * Changes:
1834  *    Break it into two parts, a writer to write the file and a reader
1835  *    the correctness of the writer. AKC -- 2010/10/27
1836  */
1837 
1838 #define NUM_DATA_SETS    4
1839 #define LOCAL_DATA_SIZE    4
1840 #define LARGE_ATTR_SIZE    256
1841 /* Since all even and odd processes are split into writer and reader comm
1842  * respectively, process 0 and 1 in COMM_WORLD become the root process of
1843  * the writer and reader comm respectively.
1844  */
1845 #define Writer_Root    0
1846 #define Reader_Root    1
1847 #define Reader_wait(mpi_err, xsteps) \
1848     mpi_err = MPI_Bcast(&xsteps, 1, MPI_INT, Writer_Root, MPI_COMM_WORLD)
1849 #define Reader_result(mpi_err, xsteps_done) \
1850     mpi_err = MPI_Bcast(&xsteps_done, 1, MPI_INT, Reader_Root, MPI_COMM_WORLD)
1851 #define Reader_check(mpi_err, xsteps, xsteps_done) \
1852     { Reader_wait(mpi_err, xsteps); \
1853       Reader_result(mpi_err, xsteps_done);}
1854 
1855 /* object names used by both rr_obj_hdr_flush_confusion and
1856  * rr_obj_hdr_flush_confusion_reader.
1857  */
1858 const char * dataset_name[NUM_DATA_SETS] =
1859     {
1860     "dataset_0",
1861     "dataset_1",
1862     "dataset_2",
1863     "dataset_3"
1864     };
1865 const char * att_name[NUM_DATA_SETS] =
1866     {
1867     "attribute_0",
1868     "attribute_1",
1869     "attribute_2",
1870     "attribute_3"
1871     };
1872 const char * lg_att_name[NUM_DATA_SETS] =
1873     {
1874     "large_attribute_0",
1875     "large_attribute_1",
1876     "large_attribute_2",
1877     "large_attribute_3"
1878     };
1879 
rr_obj_hdr_flush_confusion(void)1880 void rr_obj_hdr_flush_confusion(void)
1881 {
1882     /* MPI variables */
1883     /* private communicator size and rank */
1884     int mpi_size;
1885     int mpi_rank;
1886     int mrc;        /* mpi error code */
1887     int is_reader;    /* 1 for reader process; 0 for writer process. */
1888     MPI_Comm comm;
1889 
1890 
1891     /* test bed related variables */
1892     const char *        fcn_name = "rr_obj_hdr_flush_confusion";
1893     const hbool_t        verbose = FALSE;
1894 
1895     /* Create two new private communicators from MPI_COMM_WORLD.
1896      * Even and odd ranked processes go to comm_writers and comm_readers
1897      * respectively.
1898      */
1899     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
1900     MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
1901 
1902     HDassert(mpi_size > 2);
1903 
1904     is_reader = mpi_rank % 2;
1905     mrc = MPI_Comm_split(MPI_COMM_WORLD, is_reader, mpi_rank, &comm);
1906     VRFY((mrc==MPI_SUCCESS), "MPI_Comm_split");
1907 
1908     /* The reader proocesses branches off to do reading
1909      * while the writer processes continues to do writing
1910      * Whenever writers finish one writing step, including a H5Fflush,
1911      * they inform the readers, via MPI_COMM_WORLD, to verify.
1912      * They will wait for the result from the readers before doing the next
1913      * step. When all steps are done, they inform readers to end.
1914      */
1915     if (is_reader)
1916     rr_obj_hdr_flush_confusion_reader(comm);
1917     else
1918     rr_obj_hdr_flush_confusion_writer(comm);
1919 
1920     MPI_Comm_free(&comm);
1921     if(verbose )
1922         HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
1923 
1924     return;
1925 
1926 } /* rr_obj_hdr_flush_confusion() */
1927 
rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)1928 void rr_obj_hdr_flush_confusion_writer(MPI_Comm comm)
1929 {
1930     int i;
1931     int j;
1932     hid_t file_id = -1;
1933     hid_t fapl_id = -1;
1934     hid_t dxpl_id = -1;
1935     hid_t att_id[NUM_DATA_SETS];
1936     hid_t att_space[NUM_DATA_SETS];
1937     hid_t lg_att_id[NUM_DATA_SETS];
1938     hid_t lg_att_space[NUM_DATA_SETS];
1939     hid_t disk_space[NUM_DATA_SETS];
1940     hid_t mem_space[NUM_DATA_SETS];
1941     hid_t dataset[NUM_DATA_SETS];
1942     hsize_t att_size[1];
1943     hsize_t lg_att_size[1];
1944     hsize_t disk_count[1];
1945     hsize_t disk_size[1];
1946     hsize_t disk_start[1];
1947     hsize_t mem_count[1];
1948     hsize_t mem_size[1];
1949     hsize_t mem_start[1];
1950     herr_t err;
1951     double data[LOCAL_DATA_SIZE];
1952     double att[LOCAL_DATA_SIZE];
1953     double lg_att[LARGE_ATTR_SIZE];
1954 
1955     /* MPI variables */
1956     /* world communication size and rank */
1957     int mpi_world_size;
1958     int mpi_world_rank;
1959     /* private communicator size and rank */
1960     int mpi_size;
1961     int mpi_rank;
1962     int mrc;        /* mpi error code */
1963     /* steps to verify and have been verified */
1964     int steps = 0;
1965     int steps_done = 0;
1966 
1967     /* test bed related variables */
1968     const char   *fcn_name = "rr_obj_hdr_flush_confusion_writer";
1969     const hbool_t verbose = FALSE;
1970     const H5Ptest_param_t *pt;
1971     char         *filename;
1972 
1973     /*
1974      * setup test bed related variables:
1975      */
1976 
1977     pt = (const H5Ptest_param_t *)GetTestParameters();
1978     filename = pt->name;
1979 
1980     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
1981     MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
1982     MPI_Comm_rank(comm, &mpi_rank);
1983     MPI_Comm_size(comm, &mpi_size);
1984 
1985     /*
1986      * Set up file access property list with parallel I/O access
1987      */
1988 
1989     if(verbose )
1990         HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
1991                   mpi_rank, fcn_name);
1992 
1993     fapl_id = H5Pcreate(H5P_FILE_ACCESS);
1994     VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
1995 
1996     err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
1997     VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
1998 
1999 
2000     /*
2001      * Create a new file collectively and release property list identifier.
2002      */
2003 
2004     if(verbose )
2005         HDfprintf(stdout, "%0d:%s: Creating new file \"%s\".\n",
2006                   mpi_rank, fcn_name, filename);
2007 
2008     file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
2009     VRFY((file_id >= 0 ), "H5Fcreate() failed");
2010 
2011     err = H5Pclose(fapl_id);
2012     VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
2013 
2014 
2015     /*
2016      * Step 1: create the data sets and write data.
2017      */
2018 
2019     if(verbose )
2020         HDfprintf(stdout, "%0d:%s: Creating the datasets.\n",
2021                   mpi_rank, fcn_name);
2022 
2023     disk_size[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_size);
2024     mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
2025 
2026     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2027 
2028         disk_space[i] = H5Screate_simple(1, disk_size, NULL);
2029     VRFY((disk_space[i] >= 0), "H5Screate_simple(1) failed.\n");
2030 
2031     dataset[i] = H5Dcreate2(file_id, dataset_name[i], H5T_NATIVE_DOUBLE,
2032                       disk_space[i], H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
2033 
2034         VRFY((dataset[i] >= 0), "H5Dcreate(1) failed.\n");
2035     }
2036 
2037     /*
2038      * setup data transfer property list
2039      */
2040 
2041     if(verbose )
2042         HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
2043 
2044     dxpl_id = H5Pcreate(H5P_DATASET_XFER);
2045     VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
2046 
2047     err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
2048     VRFY((err >= 0),
2049          "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
2050 
2051     /*
2052      * write data to the data sets
2053      */
2054 
2055     if(verbose )
2056         HDfprintf(stdout, "%0d:%s: Writing datasets.\n", mpi_rank, fcn_name);
2057 
2058     disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
2059     disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
2060     mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
2061     mem_start[0] = (hsize_t)(0);
2062 
2063     for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
2064         data[j] = (double)(mpi_rank + 1);
2065     }
2066 
2067     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2068         err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
2069                 NULL, disk_count, NULL);
2070         VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
2071         mem_space[i] = H5Screate_simple(1, mem_size, NULL);
2072         VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
2073         err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
2074                 mem_start, NULL, mem_count, NULL);
2075         VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
2076         err = H5Dwrite(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
2077                 disk_space[i], dxpl_id, data);
2078         VRFY((err >= 0), "H5Dwrite(1) failed.\n");
2079         for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
2080             data[j] *= 10.0;
2081     }
2082 
2083     /*
2084      * close the data spaces
2085      */
2086 
2087     if(verbose )
2088         HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
2089 
2090     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2091         err = H5Sclose(disk_space[i]);
2092         VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
2093         err = H5Sclose(mem_space[i]);
2094         VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
2095     }
2096 
2097     /* End of Step 1: create the data sets and write data. */
2098 
2099     /*
2100      * flush the metadata cache
2101      */
2102 
2103     if(verbose )
2104         HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
2105                   mpi_rank, fcn_name);
2106     err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
2107     VRFY((err >= 0), "H5Fflush(1) failed.\n");
2108 
2109     /* Tell the reader to check the file up to steps. */
2110     steps++;
2111     Reader_check(mrc, steps, steps_done);
2112 
2113     /*
2114      * Step 2: write attributes to each dataset
2115      */
2116 
2117     if(verbose )
2118         HDfprintf(stdout, "%0d:%s: writing attributes.\n", mpi_rank, fcn_name);
2119 
2120     att_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
2121     for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
2122         att[j] = (double)(j + 1);
2123     }
2124 
2125     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2126         att_space[i] = H5Screate_simple(1, att_size, NULL);
2127         VRFY((att_space[i] >= 0), "H5Screate_simple(3) failed.\n");
2128         att_id[i] = H5Acreate2(dataset[i], att_name[i], H5T_NATIVE_DOUBLE,
2129                                att_space[i], H5P_DEFAULT, H5P_DEFAULT);
2130         VRFY((att_id[i] >= 0), "H5Acreate(1) failed.\n");
2131         err = H5Awrite(att_id[i], H5T_NATIVE_DOUBLE, att);
2132         VRFY((err >= 0), "H5Awrite(1) failed.\n");
2133         for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
2134             att[j] /= 10.0;
2135         }
2136     }
2137 
2138     /*
2139      * close attribute IDs and spaces
2140      */
2141 
2142     if(verbose )
2143         HDfprintf(stdout, "%0d:%s: closing attr ids and spaces .\n",
2144                   mpi_rank, fcn_name);
2145 
2146     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2147         err = H5Sclose(att_space[i]);
2148         VRFY((err >= 0), "H5Sclose(att_space[i]) failed.\n");
2149         err = H5Aclose(att_id[i]);
2150         VRFY((err >= 0), "H5Aclose(att_id[i]) failed.\n");
2151     }
2152 
2153     /* End of Step 2: write attributes to each dataset */
2154 
2155     /*
2156      * flush the metadata cache again
2157      */
2158 
2159     if(verbose )
2160         HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
2161                   mpi_rank, fcn_name);
2162     err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
2163     VRFY((err >= 0), "H5Fflush(2) failed.\n");
2164 
2165     /* Tell the reader to check the file up to steps. */
2166     steps++;
2167     Reader_check(mrc, steps, steps_done);
2168 
2169     /*
2170      * Step 3: write large attributes to each dataset
2171      */
2172 
2173     if(verbose )
2174         HDfprintf(stdout, "%0d:%s: writing large attributes.\n",
2175                   mpi_rank, fcn_name);
2176 
2177     lg_att_size[0] = (hsize_t)(LARGE_ATTR_SIZE);
2178 
2179     for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
2180         lg_att[j] = (double)(j + 1);
2181     }
2182 
2183     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2184         lg_att_space[i] = H5Screate_simple(1, lg_att_size, NULL);
2185         VRFY((lg_att_space[i] >= 0), "H5Screate_simple(4) failed.\n");
2186     lg_att_id[i] = H5Acreate2(dataset[i], lg_att_name[i], H5T_NATIVE_DOUBLE,
2187                                   lg_att_space[i], H5P_DEFAULT, H5P_DEFAULT);
2188         VRFY((lg_att_id[i] >= 0), "H5Acreate(2) failed.\n");
2189         err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
2190         VRFY((err >= 0), "H5Awrite(2) failed.\n");
2191         for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
2192             lg_att[j] /= 10.0;
2193         }
2194     }
2195 
2196     /* Step 3: write large attributes to each dataset */
2197 
2198     /*
2199      * flush the metadata cache yet again to clean the object headers.
2200      *
2201      * This is an attempt to crate a situation where we have dirty
2202      * object header continuation chunks, but clean opject headers
2203      * to verify a speculative bug fix -- it doesn't seem to work,
2204      * but I will leave the code in anyway, as the object header
2205      * code is going to change a lot in the near future.
2206      */
2207 
2208     if(verbose )
2209         HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
2210                   mpi_rank, fcn_name);
2211     err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
2212     VRFY((err >= 0), "H5Fflush(3) failed.\n");
2213 
2214     /* Tell the reader to check the file up to steps. */
2215     steps++;
2216     Reader_check(mrc, steps, steps_done);
2217 
2218     /*
2219      * Step 4: write different large attributes to each dataset
2220      */
2221 
2222     if(verbose )
2223         HDfprintf(stdout, "%0d:%s: writing different large attributes.\n",
2224                   mpi_rank, fcn_name);
2225 
2226     for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
2227         lg_att[j] = (double)(j + 2);
2228     }
2229 
2230     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2231         err = H5Awrite(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att);
2232         VRFY((err >= 0), "H5Awrite(2) failed.\n");
2233         for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
2234             lg_att[j] /= 10.0;
2235         }
2236     }
2237 
2238     /* End of Step 4: write different large attributes to each dataset */
2239 
2240     /*
2241      * flush the metadata cache again
2242      */
2243     if(verbose )
2244         HDfprintf(stdout, "%0d:%s: flushing metadata cache.\n",
2245                   mpi_rank, fcn_name);
2246     err = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
2247     VRFY((err >= 0), "H5Fflush(3) failed.\n");
2248 
2249     /* Tell the reader to check the file up to steps. */
2250     steps++;
2251     Reader_check(mrc, steps, steps_done);
2252 
2253     /* Step 5: Close all objects and the file */
2254 
2255     /*
2256      * close large attribute IDs and spaces
2257      */
2258 
2259     if(verbose )
2260         HDfprintf(stdout, "%0d:%s: closing large attr ids and spaces .\n",
2261                   mpi_rank, fcn_name);
2262 
2263     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2264 
2265         err = H5Sclose(lg_att_space[i]);
2266         VRFY((err >= 0), "H5Sclose(lg_att_space[i]) failed.\n");
2267         err = H5Aclose(lg_att_id[i]);
2268         VRFY((err >= 0), "H5Aclose(lg_att_id[i]) failed.\n");
2269     }
2270 
2271 
2272     /*
2273      * close the data sets
2274      */
2275 
2276     if(verbose )
2277         HDfprintf(stdout, "%0d:%s: closing datasets .\n", mpi_rank, fcn_name);
2278 
2279     for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2280         err = H5Dclose(dataset[i]);
2281         VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
2282     }
2283 
2284     /*
2285      * close the data transfer property list.
2286      */
2287 
2288     if(verbose )
2289         HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
2290 
2291     err = H5Pclose(dxpl_id);
2292     VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
2293 
2294 
2295     /*
2296      * Close file.
2297      */
2298 
2299     if(verbose )
2300         HDfprintf(stdout, "%0d:%s: closing file.\n", mpi_rank, fcn_name);
2301 
2302     err = H5Fclose(file_id);
2303     VRFY((err >= 0 ), "H5Fclose(1) failed");
2304 
2305     /* End of Step 5: Close all objects and the file */
2306     /* Tell the reader to check the file up to steps. */
2307     steps++;
2308     Reader_check(mrc, steps, steps_done);
2309 
2310 
2311     /* All done. Inform reader to end. */
2312     steps=0;
2313     Reader_check(mrc, steps, steps_done);
2314 
2315     if(verbose )
2316         HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
2317 
2318     return;
2319 
2320 } /* rr_obj_hdr_flush_confusion_writer() */
2321 
rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)2322 void rr_obj_hdr_flush_confusion_reader(MPI_Comm comm)
2323 {
2324     int i;
2325     int j;
2326     hid_t file_id = -1;
2327     hid_t fapl_id = -1;
2328     hid_t dxpl_id = -1;
2329     hid_t lg_att_id[NUM_DATA_SETS];
2330     hid_t lg_att_type[NUM_DATA_SETS];
2331     hid_t disk_space[NUM_DATA_SETS];
2332     hid_t mem_space[NUM_DATA_SETS];
2333     hid_t dataset[NUM_DATA_SETS];
2334     hsize_t disk_count[1];
2335     hsize_t disk_start[1];
2336     hsize_t mem_count[1];
2337     hsize_t mem_size[1];
2338     hsize_t mem_start[1];
2339     herr_t err;
2340     htri_t tri_err;
2341     double data[LOCAL_DATA_SIZE];
2342     double data_read[LOCAL_DATA_SIZE];
2343     double att[LOCAL_DATA_SIZE];
2344     double att_read[LOCAL_DATA_SIZE];
2345     double lg_att[LARGE_ATTR_SIZE];
2346     double lg_att_read[LARGE_ATTR_SIZE];
2347 
2348     /* MPI variables */
2349     /* world communication size and rank */
2350     int    mpi_world_size;
2351     int    mpi_world_rank;
2352     /* private communicator size and rank */
2353     int    mpi_size;
2354     int    mpi_rank;
2355     int    mrc;        /* mpi error code */
2356     int    steps = -1;        /* How far (steps) to verify the file */
2357     int    steps_done = -1;    /* How far (steps) have been verified */
2358 
2359     /* test bed related variables */
2360     const char   *fcn_name = "rr_obj_hdr_flush_confusion_reader";
2361     const hbool_t verbose = FALSE;
2362     const H5Ptest_param_t *pt;
2363     char         *filename;
2364 
2365     /*
2366      * setup test bed related variables:
2367      */
2368 
2369     pt = (const H5Ptest_param_t *)GetTestParameters();
2370     filename = pt->name;
2371 
2372     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_world_rank);
2373     MPI_Comm_size(MPI_COMM_WORLD, &mpi_world_size);
2374     MPI_Comm_rank(comm, &mpi_rank);
2375     MPI_Comm_size(comm, &mpi_size);
2376 
2377     /* Repeatedly re-open the file and verify its contents until it is */
2378     /* told to end (when steps=0). */
2379     while (steps_done != 0){
2380         Reader_wait(mrc, steps);
2381         VRFY((mrc >= 0), "Reader_wait failed");
2382         steps_done = 0;
2383 
2384         if (steps > 0 ){
2385             /*
2386             * Set up file access property list with parallel I/O access
2387             */
2388 
2389             if(verbose )
2390                 HDfprintf(stdout, "%0d:%s: Setting up property list.\n",
2391                         mpi_rank, fcn_name);
2392 
2393             fapl_id = H5Pcreate(H5P_FILE_ACCESS);
2394             VRFY((fapl_id != -1), "H5Pcreate(H5P_FILE_ACCESS) failed");
2395             err = H5Pset_fapl_mpio(fapl_id, comm, MPI_INFO_NULL);
2396             VRFY((err >= 0 ), "H5Pset_fapl_mpio() failed");
2397 
2398             /*
2399             * Create a new file collectively and release property list identifier.
2400             */
2401 
2402             if(verbose )
2403                 HDfprintf(stdout, "%0d:%s: Re-open file \"%s\".\n",
2404                         mpi_rank, fcn_name, filename);
2405 
2406             file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id);
2407             VRFY((file_id >= 0 ), "H5Fopen() failed");
2408             err = H5Pclose(fapl_id);
2409             VRFY((err >= 0 ), "H5Pclose(fapl_id) failed");
2410 
2411 #if 1
2412             if (steps >= 1){
2413                 /*=====================================================*
2414                 * Step 1: open the data sets and read data.
2415                 *=====================================================*/
2416 
2417                 if(verbose )
2418                     HDfprintf(stdout, "%0d:%s: opening the datasets.\n",
2419                             mpi_rank, fcn_name);
2420 
2421                 for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2422                     dataset[i] = -1;
2423                 }
2424 
2425                 for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2426                     dataset[i] = H5Dopen2(file_id, dataset_name[i], H5P_DEFAULT);
2427                     VRFY((dataset[i] >= 0), "H5Dopen(1) failed.\n");
2428                     disk_space[i] = H5Dget_space(dataset[i]);
2429                     VRFY((disk_space[i] >= 0), "H5Dget_space failed.\n");
2430                 }
2431 
2432                 /*
2433                 * setup data transfer property list
2434                 */
2435 
2436                 if(verbose )
2437                     HDfprintf(stdout, "%0d:%s: Setting up dxpl.\n", mpi_rank, fcn_name);
2438 
2439                 dxpl_id = H5Pcreate(H5P_DATASET_XFER);
2440                 VRFY((dxpl_id != -1), "H5Pcreate(H5P_DATASET_XFER) failed.\n");
2441                 err = H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE);
2442                 VRFY((err >= 0),
2443                         "H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) failed.\n");
2444 
2445                 /*
2446                 * read data from the data sets
2447                 */
2448 
2449                 if(verbose )
2450                     HDfprintf(stdout, "%0d:%s: Reading datasets.\n", mpi_rank, fcn_name);
2451 
2452                 disk_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
2453                 disk_start[0] = (hsize_t)(LOCAL_DATA_SIZE * mpi_rank);
2454 
2455                 mem_size[0] = (hsize_t)(LOCAL_DATA_SIZE);
2456 
2457                 mem_count[0] = (hsize_t)(LOCAL_DATA_SIZE);
2458                 mem_start[0] = (hsize_t)(0);
2459 
2460                 /* set up expected data for verification */
2461                 for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
2462                     data[j] = (double)(mpi_rank + 1);
2463                 }
2464 
2465                 for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2466                     err = H5Sselect_hyperslab(disk_space[i], H5S_SELECT_SET, disk_start,
2467                             NULL, disk_count, NULL);
2468                     VRFY((err >= 0), "H5Sselect_hyperslab(1) failed.\n");
2469                     mem_space[i] = H5Screate_simple(1, mem_size, NULL);
2470                     VRFY((mem_space[i] >= 0), "H5Screate_simple(2) failed.\n");
2471                     err = H5Sselect_hyperslab(mem_space[i], H5S_SELECT_SET,
2472                             mem_start, NULL, mem_count, NULL);
2473                     VRFY((err >= 0), "H5Sselect_hyperslab(2) failed.\n");
2474                     err = H5Dread(dataset[i], H5T_NATIVE_DOUBLE, mem_space[i],
2475                             disk_space[i], dxpl_id, data_read);
2476                     VRFY((err >= 0), "H5Dread(1) failed.\n");
2477 
2478                     /* compare read data with expected data */
2479                     for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
2480                         if (data_read[j] != data[j]){
2481                             HDfprintf(stdout,
2482                                     "%0d:%s: Reading datasets value failed in "
2483                                     "Dataset %d, at position %d: expect %f, got %f.\n",
2484                                     mpi_rank, fcn_name, i, j, data[j], data_read[j]);
2485                             nerrors++;
2486                         }
2487                     for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
2488                         data[j] *= 10.0;
2489                 }
2490 
2491                 /*
2492                 * close the data spaces
2493                 */
2494 
2495                 if(verbose )
2496                     HDfprintf(stdout, "%0d:%s: closing dataspaces.\n", mpi_rank, fcn_name);
2497 
2498                 for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2499                     err = H5Sclose(disk_space[i]);
2500                     VRFY((err >= 0), "H5Sclose(disk_space[i]) failed.\n");
2501                     err = H5Sclose(mem_space[i]);
2502                     VRFY((err >= 0), "H5Sclose(mem_space[i]) failed.\n");
2503                 }
2504                 steps_done++;
2505             }
2506             /* End of Step 1: open the data sets and read data. */
2507 #endif
2508 
2509 #if 1
2510             /*=====================================================*
2511             * Step 2: reading attributes from each dataset
2512             *=====================================================*/
2513 
2514             if (steps >= 2){
2515                 if(verbose )
2516                     HDfprintf(stdout, "%0d:%s: reading attributes.\n", mpi_rank, fcn_name);
2517 
2518                 for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
2519                     att[j] = (double)(j + 1);
2520                 }
2521 
2522                 for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2523                     hid_t att_id, att_type;
2524 
2525                     att_id = H5Aopen(dataset[i], att_name[i], H5P_DEFAULT);
2526                     VRFY((att_id >= 0), "H5Aopen failed.\n");
2527                     att_type = H5Aget_type(att_id);
2528                     VRFY((att_type >= 0), "H5Aget_type failed.\n");
2529                     tri_err = H5Tequal(att_type, H5T_NATIVE_DOUBLE);
2530                     VRFY((tri_err >= 0), "H5Tequal failed.\n");
2531                     if (tri_err==0){
2532                         HDfprintf(stdout,
2533                                 "%0d:%s: Mismatched Attribute type of Dataset %d.\n",
2534                                 mpi_rank, fcn_name, i);
2535                         nerrors++;
2536                     }
2537                     else {
2538                         /* should verify attribute size before H5Aread */
2539                         err = H5Aread(att_id, H5T_NATIVE_DOUBLE, att_read);
2540                         VRFY((err >= 0), "H5Aread failed.\n");
2541                         /* compare read attribute data with expected data */
2542                         for ( j = 0; j < LOCAL_DATA_SIZE; j++ )
2543                             if (att_read[j] != att[j]){
2544                                 HDfprintf(stdout,
2545                                         "%0d:%s: Mismatched attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
2546                                         mpi_rank, fcn_name, i, j, att[j], att_read[j]);
2547                                 nerrors++;
2548                             }
2549                         for ( j = 0; j < LOCAL_DATA_SIZE; j++ ) {
2550                             att[j] /= 10.0;
2551                         }
2552                     }
2553                     err = H5Aclose(att_id);
2554                     VRFY((err >= 0), "H5Aclose failed.\n");
2555                 }
2556                 steps_done++;
2557             }
2558             /* End of Step 2: reading attributes from each dataset */
2559 #endif
2560 
2561 
2562 #if 1
2563             /*=====================================================*
2564             * Step 3 or 4: read large attributes from each dataset.
2565             * Step 4 has different attribute value from step 3.
2566             *=====================================================*/
2567 
2568             if (steps >= 3){
2569                 if(verbose )
2570                     HDfprintf(stdout, "%0d:%s: reading large attributes.\n", mpi_rank, fcn_name);
2571 
2572                 for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
2573                     lg_att[j] = (steps==3) ? (double)(j + 1) : (double)(j+2);
2574                 }
2575 
2576                 for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2577                     lg_att_id[i] = H5Aopen(dataset[i], lg_att_name[i], H5P_DEFAULT);
2578                     VRFY((lg_att_id[i] >= 0), "H5Aopen(2) failed.\n");
2579                     lg_att_type[i] = H5Aget_type(lg_att_id[i]);
2580                     VRFY((err >= 0), "H5Aget_type failed.\n");
2581                     tri_err = H5Tequal(lg_att_type[i], H5T_NATIVE_DOUBLE);
2582                     VRFY((tri_err >= 0), "H5Tequal failed.\n");
2583                     if (tri_err==0){
2584                         HDfprintf(stdout,
2585                                 "%0d:%s: Mismatched Large attribute type of Dataset %d.\n",
2586                                 mpi_rank, fcn_name, i);
2587                         nerrors++;
2588                     }
2589                     else{
2590                         /* should verify large attribute size before H5Aread */
2591                         err = H5Aread(lg_att_id[i], H5T_NATIVE_DOUBLE, lg_att_read);
2592                         VRFY((err >= 0), "H5Aread failed.\n");
2593                         /* compare read attribute data with expected data */
2594                         for ( j = 0; j < LARGE_ATTR_SIZE; j++ )
2595                             if (lg_att_read[j] != lg_att[j]){
2596                                 HDfprintf(stdout,
2597                                         "%0d:%s: Mismatched large attribute data read in Dataset %d, at position %d: expect %f, got %f.\n",
2598                                         mpi_rank, fcn_name, i, j, lg_att[j], lg_att_read[j]);
2599                                 nerrors++;
2600                             }
2601                         for ( j = 0; j < LARGE_ATTR_SIZE; j++ ) {
2602 
2603                             lg_att[j] /= 10.0;
2604                         }
2605                     }
2606                     err = H5Tclose(lg_att_type[i]);
2607                     VRFY((err >= 0), "H5Tclose failed.\n");
2608                     err = H5Aclose(lg_att_id[i]);
2609                     VRFY((err >= 0), "H5Aclose failed.\n");
2610                 }
2611                 /* Both step 3 and 4 use this same read checking code. */
2612                 steps_done = (steps==3) ? 3 : 4;
2613             }
2614 
2615             /* End of Step 3 or 4: read large attributes from each dataset */
2616 #endif
2617 
2618 
2619             /*=====================================================*
2620             * Step 5: read all objects from the file
2621             *=====================================================*/
2622             if (steps>=5){
2623                 /* nothing extra to verify. The file is closed normally. */
2624                 /* Just increment steps_done */
2625                 steps_done++;
2626             }
2627 
2628             /*
2629             * Close the data sets
2630             */
2631 
2632             if(verbose )
2633                 HDfprintf(stdout, "%0d:%s: closing datasets again.\n",
2634                         mpi_rank, fcn_name);
2635 
2636             for ( i = 0; i < NUM_DATA_SETS; i++ ) {
2637                 if ( dataset[i] >= 0 ) {
2638                     err = H5Dclose(dataset[i]);
2639                     VRFY((err >= 0), "H5Dclose(dataset[i])1 failed.\n");
2640                 }
2641             }
2642 
2643             /*
2644             * close the data transfer property list.
2645             */
2646 
2647             if(verbose )
2648                 HDfprintf(stdout, "%0d:%s: closing dxpl .\n", mpi_rank, fcn_name);
2649 
2650             err = H5Pclose(dxpl_id);
2651             VRFY((err >= 0), "H5Pclose(dxpl_id) failed.\n");
2652 
2653             /*
2654             * Close the file
2655             */
2656             if(verbose)
2657                 HDfprintf(stdout, "%0d:%s: closing file again.\n",
2658                         mpi_rank, fcn_name);
2659             err = H5Fclose(file_id);
2660             VRFY((err >= 0 ), "H5Fclose(1) failed");
2661 
2662         } /* else if (steps_done==0) */
2663         Reader_result(mrc, steps_done);
2664     } /* end while(1) */
2665 
2666     if(verbose )
2667         HDfprintf(stdout, "%0d:%s: Done.\n", mpi_rank, fcn_name);
2668 
2669     return;
2670 } /* rr_obj_hdr_flush_confusion_reader() */
2671 
2672 #undef NUM_DATA_SETS
2673 #undef LOCAL_DATA_SIZE
2674 #undef LARGE_ATTR_SIZE
2675 #undef Reader_check
2676 #undef Reader_wait
2677 #undef Reader_result
2678 #undef Writer_Root
2679 #undef Reader_Root
2680 
2681 
2682 /*
2683  * Test creating a chunked dataset in parallel in a file with an alignment set
2684  * and an alignment threshold large enough to avoid aligning the chunks but
2685  * small enough that the raw data aggregator will be aligned if it is treated as
2686  * an object that must be aligned by the library
2687  */
2688 #define CHUNK_SIZE 72
2689 #define NCHUNKS 32
2690 #define AGGR_SIZE 2048
2691 #define EXTRA_ALIGN 100
2692 
chunk_align_bug_1(void)2693  void chunk_align_bug_1(void)
2694  {
2695     int                 mpi_rank;
2696     hid_t               file_id, dset_id, fapl_id, dcpl_id, space_id;
2697     hsize_t             dims = CHUNK_SIZE * NCHUNKS, cdims = CHUNK_SIZE;
2698     h5_stat_size_t      file_size;
2699     hsize_t             align;
2700     herr_t              ret;
2701     const char          *filename;
2702 
2703     MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
2704 
2705     filename = (const char *)GetTestParameters();
2706 
2707     /* Create file without alignment */
2708     fapl_id = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
2709     VRFY((fapl_id >= 0), "create_faccess_plist succeeded");
2710     file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
2711     VRFY((file_id >= 0), "H5Fcreate succeeded");
2712 
2713     /* Close file */
2714     ret = H5Fclose(file_id);
2715     VRFY((ret >= 0), "H5Fclose succeeded");
2716 
2717     /* Get file size */
2718     file_size = h5_get_file_size(filename, fapl_id);
2719     VRFY((file_size >= 0), "h5_get_file_size succeeded");
2720 
2721     /* Calculate alignment value, set to allow a chunk to squeak in between the
2722      * original EOF and the aligned location of the aggregator.  Add some space
2723      * for the dataset metadata */
2724     align = (hsize_t)file_size + CHUNK_SIZE + EXTRA_ALIGN;
2725 
2726     /* Set aggregator size and alignment, disable metadata aggregator */
2727     HDassert(AGGR_SIZE > CHUNK_SIZE);
2728     ret = H5Pset_small_data_block_size(fapl_id, AGGR_SIZE);
2729     VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
2730     ret = H5Pset_meta_block_size(fapl_id, 0);
2731     VRFY((ret >= 0), "H5Pset_meta_block_size succeeded");
2732     ret = H5Pset_alignment(fapl_id, CHUNK_SIZE + 1, align);
2733     VRFY((ret >= 0), "H5Pset_small_data_block_size succeeded");
2734 
2735     /* Reopen file with new settings */
2736     file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
2737     VRFY((file_id >= 0), "H5Fopen succeeded");
2738 
2739     /* Create dataset */
2740     space_id = H5Screate_simple(1, &dims, NULL);
2741     VRFY((space_id >= 0), "H5Screate_simple succeeded");
2742     dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
2743     VRFY((dcpl_id >= 0), "H5Pcreate succeeded");
2744     ret = H5Pset_chunk(dcpl_id, 1, &cdims);
2745     VRFY((ret >= 0), "H5Pset_chunk succeeded");
2746     dset_id = H5Dcreate2(file_id, "dset", H5T_NATIVE_CHAR, space_id, H5P_DEFAULT, dcpl_id, H5P_DEFAULT);
2747     VRFY((dset_id >= 0), "H5Dcreate2 succeeded");
2748 
2749     /* Close ids */
2750     ret = H5Dclose(dset_id);
2751     VRFY((dset_id >= 0), "H5Dclose succeeded");
2752     ret = H5Sclose(space_id);
2753     VRFY((space_id >= 0), "H5Sclose succeeded");
2754     ret = H5Pclose(dcpl_id);
2755     VRFY((dcpl_id >= 0), "H5Pclose succeeded");
2756     ret = H5Pclose(fapl_id);
2757     VRFY((fapl_id >= 0), "H5Pclose succeeded");
2758 
2759     /* Close file */
2760     ret = H5Fclose(file_id);
2761     VRFY((ret >= 0), "H5Fclose succeeded");
2762 
2763     return;
2764 } /* end chunk_align_bug_1() */
2765 
2766 
2767 /*=============================================================================
2768  *                         End of t_mdset.c
2769  *===========================================================================*/
2770 
2771