1 //***********************************************************************
2 //
3 // Example program how to use ALUGrid.
4 // Author: Robert Kloefkorn
5 //
6 // This little program read one of the macrogrids and generates a grid.
7 // The grid is refined and coarsend again.
8 //
9 //***********************************************************************
10
11 #include <config.h>
12 #include <iostream>
13
14 // include serial part of ALUGrid
15 #include <dune/alugrid/grid.hh>
16
17 using namespace ALUGrid;
18 using namespace std;
19
20 typedef Gitter::AdaptRestrictProlong AdaptRestrictProlongType;
21
22 typedef Gitter::helement_STI HElemType; // Interface Element
23 typedef Gitter::hface_STI HFaceType; // Interface Element
24 typedef Gitter::hedge_STI HEdgeType; // Interface Element
25 typedef Gitter::vertex_STI HVertexType; // Interface Element
26 typedef Gitter::hbndseg HGhostType;
27
28 #if HAVE_MPI
29 #define PARALLEL 1
30 #warning RUNNING PARALLEL VERSION
31 #else
32 #define PARALLEL 0
33 #endif
34
35
36 typedef GitterDunePll GitterType;
37
38 // refine grid globally, i.e. mark all elements and then call adapt
globalRefine(GitterType * grid,int refcount,int rank)39 void globalRefine(GitterType* grid, int refcount, int rank) {
40
41 for (int count=refcount ; count > 0; count--) {
42 cout << "Refine global: run " << refcount-count << endl;
43 {
44 // get LeafIterator which iterates over all leaf elements of the grid
45 LeafIterator < Gitter::helement_STI > w (*grid) ;
46 cout << "we have " << w->size() << " elements on rank = " << rank << "\n";
47
48 if( rank == 0 )
49 {
50 for (w->first () ; ! w->done () ; w->next ())
51 {
52 // mark element for refinement
53 w->item ().tagForGlobalRefinement ();
54 }
55 }
56 }
57 // adapt grid
58 grid->adapt();
59 grid->duneLoadBalance();
60
61 // print size of grid
62 //grid->printsize () ;
63 }
64 }
65
66 // coarse grid globally, i.e. mark all elements for coarsening
67 // and then call adapt
globalCoarsening(GitterType * grid,int refcount,int rank)68 void globalCoarsening(GitterType * grid, int refcount, int rank) {
69
70 for (int count=refcount ; count > 0; count--) {
71 cout << "Global Coarsening: run " << refcount-count << endl;
72 {
73 // get LeafIterator which iterates over all leaf elements of the grid
74 LeafIterator < Gitter::helement_STI > w (*grid) ;
75
76 for (w->first () ; ! w->done () ; w->next ())
77 {
78 // mark elements for coarsening
79 w->item ().tagForGlobalCoarsening() ;
80 }
81 }
82 // adapt grid
83 grid->adapt ();
84
85 // print size of grid
86 grid->printsize () ;
87 }
88
89 {
90 // get LeafIterator which iterates over all leaf elements of the grid
91 LeafIterator < Gitter::helement_STI > w (*grid) ;
92 cout << "we have " << w->size() << " elements on rank = " << rank << "\n";
93 }
94 }
95
96 // perform walk over elements of a certain level
levelwalk(GitterType * grid,int level)97 void levelwalk(GitterType * grid, int level) {
98 typedef Insert <AccessIterator <
99 Gitter::helement_STI>::Handle,
100 TreeIterator <Gitter :: helement_STI, any_has_level <Gitter::helement_STI> > >
101 LevelIterator;
102
103 LevelIterator it (grid->container(), level);
104 int i = 0;
105 for (it.first(); !it.done(); it.next())
106 {
107 cout << "Element " << it.item().getIndex() << " has " << i++ << " as level index " << endl;
108 }
109 cout << endl;
110 }
111
112
113 // exmaple on read grid, refine global and print again
main(int argc,char ** argv,const char ** envp)114 int main (int argc, char ** argv, const char ** envp)
115 {
116 MPI_Init(&argc,&argv);
117 int myrank = -1;
118
119 MPI_Comm_rank(MPI_COMM_WORLD,&myrank);
120
121 {
122 int mxl = 3;
123 if (argc < 2)
124 {
125 cout << "usage: "<< argv[0] << " <macro grid> <opt: level> \n";
126 abort();
127 }
128
129 if (argc < 3)
130 cout << "Default level = "<< mxl << "choosen! \n";
131 else
132 mxl = atoi(argv[2]);
133
134 std::string macroname( argv[1] );
135
136 /*
137 cout << "\n-----------------------------------------------\n";
138 cout << "read macro grid from < " << macroname << " > !" << endl;
139 cout << "-----------------------------------------------\n";
140 */
141
142 MpAccessMPI mpAccess(MPI_COMM_WORLD);
143 GitterDunePll grid(macroname.c_str(),mpAccess);
144 //GitterDunePll grid(macroname.c_str(),mpAccess);
145 cout << "Grid generated! \n";
146 grid.duneLoadBalance();
147
148 grid.printsize();
149 cout << "---------------------------------------------\n";
150
151 globalRefine(&grid, mxl, myrank);
152
153 grid.duneLoadBalance();
154
155 grid.printsize();
156 cout << "---------------------------------------------\n";
157
158 globalCoarsening(&grid, mxl, myrank );
159 grid.printsize();
160 }
161
162 MPI_Finalize();
163 return 0;
164 }
165
166