1 /*
2 * Distributed under the OSI-approved Apache License, Version 2.0. See
3 * accompanying file Copyright.txt for details.
4 *
5 * helloHDF5Writer.cpp: Simple self-descriptive example of how to write a
6 * variable to a parallel HDF5 File using MPI processes.
7 *
8 * Created on: March 20, 2017
9 * Author: Junmin
10 */
11
12 #include <ios> //std::ios_base::failure
13 #include <iostream> //std::cout
14 #include <mpi.h>
15 #include <stdexcept> //std::invalid_argument std::exception
16 #include <vector>
17
18 #include <adios2.h>
19
main(int argc,char * argv[])20 int main(int argc, char *argv[])
21 {
22 MPI_Init(&argc, &argv);
23 int rank, size;
24 MPI_Comm_rank(MPI_COMM_WORLD, &rank);
25 MPI_Comm_size(MPI_COMM_WORLD, &size);
26
27 /** Application variable */
28 std::vector<float> myFloats = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
29 std::vector<int> myInts = {0, -1, -2, -3, -4, -5, -6, -7, -8, -9};
30 double myScalar = 1.234;
31 const std::size_t Nx = myFloats.size();
32
33 try
34 {
35 /** ADIOS class factory of IO class objects */
36 adios2::ADIOS adios(MPI_COMM_WORLD);
37
38 /*** IO class object: settings and factory of Settings: Variables,
39 * Parameters, Transports, and Execution: Engines */
40 adios2::IO hdf5IO = adios.DeclareIO("HDFFileIO");
41 hdf5IO.SetEngine("HDF5");
42 hdf5IO.SetParameter("IdleH5Writer",
43 "true"); // set this if not all ranks are writting
44
45 /** global array : name, { shape (total) }, { start (local) }, { count
46 * (local) }, all are constant dimensions */
47 adios2::Variable<float> h5Floats = hdf5IO.DefineVariable<float>(
48 "h5Floats", {size * Nx}, {rank * Nx}, {Nx}, adios2::ConstantDims);
49
50 adios2::Variable<int> h5Ints = hdf5IO.DefineVariable<int>(
51 "h5Ints", {size * Nx}, {rank * Nx}, {Nx}, adios2::ConstantDims);
52
53 adios2::Variable<double> h5ScalarDouble =
54 hdf5IO.DefineVariable<double>("h5ScalarDouble");
55 /** Engine derived class, spawned to start IO operations */
56 adios2::Engine hdf5Writer =
57 hdf5IO.Open("myVector.h5", adios2::Mode::Write);
58 #ifdef ALL_RANKS_WRITE
59 // all Ranks must call Put
60 /** Write variable for buffering */
61 hdf5Writer.Put<float>(h5Floats, myFloats.data());
62 hdf5Writer.Put(h5Ints, myInts.data());
63 hdf5Writer.Put(h5ScalarDouble, &myScalar);
64 #else
65 // using collective Begin/EndStep() to run the
66 // collective HDF5 calls. Now Ranks can skip writting if no data
67 // presented
68 hdf5Writer.BeginStep();
69 if (rank == 0)
70 {
71 hdf5Writer.Put<float>(h5Floats, myFloats.data());
72 hdf5Writer.Put(h5Ints, myInts.data());
73 hdf5Writer.Put(h5ScalarDouble, &myScalar);
74 }
75 hdf5Writer.EndStep();
76 #endif
77 std::vector<int64_t> m_globalDims = {10, 20, 30, 40};
78 hdf5IO.DefineAttribute<std::string>(
79 "adios2_schema/version_major",
80 std::to_string(ADIOS2_VERSION_MAJOR));
81 hdf5IO.DefineAttribute<std::string>(
82 "adios2_schema/version_minor",
83 std::to_string(ADIOS2_VERSION_MINOR));
84 hdf5IO.DefineAttribute<std::string>("/adios2_schema/mesh/type",
85 "explicit");
86 hdf5IO.DefineAttribute<std::int64_t>("adios2_schema/mesh/dimension0",
87 m_globalDims[0]);
88 hdf5IO.DefineAttribute<std::int64_t>("adios2_schema/mesh/dimension1",
89 m_globalDims[1]);
90 hdf5IO.DefineAttribute<std::int64_t>("adios2_schema/mesh/dimension2",
91 m_globalDims[2]);
92 hdf5IO.DefineAttribute<std::int64_t>("adios2_schema/mesh/dimension3",
93 m_globalDims[3]);
94 hdf5IO.DefineAttribute<std::int64_t>("adios2_schema/mesh/dimension-num",
95 m_globalDims.size());
96
97 hdf5Writer.Close();
98 }
99 catch (std::invalid_argument &e)
100 {
101 std::cout << "Invalid argument exception, STOPPING PROGRAM from rank "
102 << rank << "\n";
103 std::cout << e.what() << "\n";
104 }
105 catch (std::ios_base::failure &e)
106 {
107 std::cout
108 << "IO System base failure exception, STOPPING PROGRAM from rank "
109 << rank << "\n";
110 std::cout << e.what() << "\n";
111 }
112 catch (std::exception &e)
113 {
114 std::cout << "Exception, STOPPING PROGRAM from rank " << rank << "\n";
115 std::cout << e.what() << "\n";
116 }
117
118 MPI_Finalize();
119
120 return 0;
121 }
122