1 // MFEM Example 1 - Parallel Version
2 // PUMI Modification
3 //
4 // Compile with: make ex1p
5 //
6 // Sample runs:
7 // mpirun -np 8 ex1p -m ../../data/pumi/parallel/Kova/Kova100k_8.smb
8 // -p ../../data/pumi/geom/Kova.dmg -o 1 -go 2
9 //
10 // Note: Example models + meshes for the PUMI examples can be downloaded
11 // from github.com/mfem/data/pumi. After downloading we recommend
12 // creating a symbolic link to the above directory in ../../data.
13 //
14 // Description: This example code demonstrates the use of MFEM to define a
15 // simple finite element discretization of the Laplace problem
16 // -Delta u = 1 with homogeneous Dirichlet boundary conditions.
17 // Specifically, we discretize using a FE space of the specified
18 // order, or if order < 1 using an isoparametric/isogeometric
19 // space (i.e. quadratic for quadratic curvilinear mesh, NURBS for
20 // NURBS mesh, etc.)
21 //
22 // The example highlights the use of mesh refinement, finite
23 // element grid functions, as well as linear and bilinear forms
24 // corresponding to the left-hand side and right-hand side of the
25 // discrete linear system. We also cover the explicit elimination
26 // of essential boundary conditions, static condensation, and the
27 // optional connection to the GLVis tool for visualization.
28 //
29 // This PUMI modification demonstrates how PUMI's API can be used
30 // to load a parallel PUMI mesh classified on a geometric model
31 // and then generate the corresponding parallel MFEM mesh. The
32 // example also performs a "uniform" refinement, similar to the
33 // MFEM examples, for coarse meshes. However, the refinement is
34 // performed using the PUMI API. The inputs are a Parasolid
35 // model, "*.xmt_txt" and SCOREC parallel meshes "*.smb". The
36 // option "-o" is used for the Finite Element order and "-go" for
37 // the geometry order. Note that they can be used independently:
38 // "-o 8 -go 3" solves for 8th order FE on third order geometry.
39 //
40 // NOTE: Model/Mesh files for this example are in the (large) data file
41 // repository of MFEM here https://github.com/mfem/data under the
42 // folder named "pumi", which consists of the following sub-folders:
43 // a) geom --> model files
44 // b) parallel --> parallel pumi mesh files
45 // c) serial --> serial pumi mesh files
46
47
48 #include "mfem.hpp"
49 #include <fstream>
50 #include <iostream>
51
52 #ifdef MFEM_USE_SIMMETRIX
53 #include <SimUtil.h>
54 #include <gmi_sim.h>
55 #endif
56 #include <apfMDS.h>
57 #include <gmi_null.h>
58 #include <PCU.h>
59 #include <apfConvert.h>
60 #include <gmi_mesh.h>
61 #include <crv.h>
62
63 #ifndef MFEM_USE_PUMI
64 #error This example requires that MFEM is built with MFEM_USE_PUMI=YES
65 #endif
66
67 using namespace std;
68 using namespace mfem;
69
main(int argc,char * argv[])70 int main(int argc, char *argv[])
71 {
72 // 1. Initialize MPI.
73 int num_procs, myid;
74 MPI_Init(&argc, &argv);
75 MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
76 MPI_Comm_rank(MPI_COMM_WORLD, &myid);
77
78 // 2. Parse command-line options.
79 const char *mesh_file = "../../data/pumi/parallel/Kova/Kova100k_8.smb";
80 #ifdef MFEM_USE_SIMMETRIX
81 const char *model_file = "../../data/pumi/geom/Kova.x_t";
82 #else
83 const char *model_file = "../../data/pumi/geom/Kova.dmg";
84 #endif
85 int order = 1;
86 bool static_cond = false;
87 bool visualization = 1;
88 int geom_order = 1;
89
90 OptionsParser args(argc, argv);
91 args.AddOption(&mesh_file, "-m", "--mesh",
92 "Mesh file to use.");
93 args.AddOption(&order, "-o", "--order",
94 "Finite element order (polynomial degree) or -1 for"
95 " isoparametric space.");
96 args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
97 "--no-static-condensation", "Enable static condensation.");
98 args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
99 "--no-visualization",
100 "Enable or disable GLVis visualization.");
101 args.AddOption(&model_file, "-p", "--parasolid",
102 "Parasolid model to use.");
103 args.AddOption(&geom_order, "-go", "--geometry_order",
104 "Geometric order of the model");
105 args.Parse();
106 if (!args.Good())
107 {
108 if (myid == 0)
109 {
110 args.PrintUsage(cout);
111 }
112 MPI_Finalize();
113 return 1;
114 }
115 if (myid == 0)
116 {
117 args.PrintOptions(cout);
118 }
119
120 // 3. Read the SCOREC Mesh
121 PCU_Comm_Init();
122 #ifdef MFEM_USE_SIMMETRIX
123 Sim_readLicenseFile(0);
124 gmi_sim_start();
125 gmi_register_sim();
126 #endif
127 gmi_register_mesh();
128
129 apf::Mesh2* pumi_mesh;
130 pumi_mesh = apf::loadMdsMesh(model_file, mesh_file);
131
132 // 4. Increase the geometry order and refine the mesh if necessary. Parallel
133 // uniform refinement is performed if the total number of elements is less
134 // than 10,000.
135 int dim = pumi_mesh->getDimension();
136 int nEle = pumi_mesh->count(dim);
137 int ref_levels = (int)floor(log(10000./nEle)/log(2.)/dim);
138
139 if (geom_order > 1)
140 {
141 crv::BezierCurver bc(pumi_mesh, geom_order, 2);
142 bc.run();
143 }
144
145 // Perform Uniform refinement
146 if (ref_levels > 1)
147 {
148 ma::Input* uniInput = ma::configureUniformRefine(pumi_mesh, ref_levels);
149
150 if (geom_order > 1)
151 {
152 crv::adapt(uniInput);
153 }
154 else
155 {
156 ma::adapt(uniInput);
157 }
158 }
159
160 pumi_mesh->verify();
161
162 // 5. Create the parallel MFEM mesh object from the parallel PUMI mesh.
163 // We can handle triangular and tetrahedral meshes. Note that the
164 // mesh resolution is performed on the PUMI mesh.
165 ParMesh *pmesh = new ParPumiMesh(MPI_COMM_WORLD, pumi_mesh);
166
167 // 6. Define a parallel finite element space on the parallel mesh. Here we
168 // use continuous Lagrange finite elements of the specified order. If
169 // order < 1, we instead use an isoparametric/isogeometric space.
170 FiniteElementCollection *fec;
171 if (order > 0)
172 {
173 fec = new H1_FECollection(order, dim);
174 }
175 else if (pmesh->GetNodes())
176 {
177 fec = pmesh->GetNodes()->OwnFEC();
178 if (myid == 0)
179 {
180 cout << "Using isoparametric FEs: " << fec->Name() << endl;
181 }
182 }
183 else
184 {
185 fec = new H1_FECollection(order = 1, dim);
186 }
187 ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
188 HYPRE_BigInt size = fespace->GlobalTrueVSize();
189 if (myid == 0)
190 {
191 cout << "Number of finite element unknowns: " << size << endl;
192 }
193
194 // 7. Determine the list of true (i.e. parallel conforming) essential
195 // boundary dofs. In this example, the boundary conditions are defined
196 // by marking all the boundary attributes from the mesh as essential
197 // (Dirichlet) and converting them to a list of true dofs.
198 Array<int> ess_tdof_list;
199 if (pmesh->bdr_attributes.Size())
200 {
201 Array<int> ess_bdr(pmesh->bdr_attributes.Max());
202 ess_bdr = 1;
203 fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
204 }
205
206 // 8. Set up the parallel linear form b(.) which corresponds to the
207 // right-hand side of the FEM linear system, which in this case is
208 // (1,phi_i) where phi_i are the basis functions in fespace.
209 ParLinearForm *b = new ParLinearForm(fespace);
210 ConstantCoefficient one(1.0);
211 b->AddDomainIntegrator(new DomainLFIntegrator(one));
212 b->Assemble();
213
214 // 9. Define the solution vector x as a parallel finite element grid function
215 // corresponding to fespace. Initialize x with initial guess of zero,
216 // which satisfies the boundary conditions.
217 ParGridFunction x(fespace);
218 x = 0.0;
219
220 // 10. Set up the parallel bilinear form a(.,.) on the finite element space
221 // corresponding to the Laplacian operator -Delta, by adding the Diffusion
222 // domain integrator.
223 ParBilinearForm *a = new ParBilinearForm(fespace);
224 a->AddDomainIntegrator(new DiffusionIntegrator(one));
225
226 // 11. Assemble the parallel bilinear form and the corresponding linear
227 // system, applying any necessary transformations such as: parallel
228 // assembly, eliminating boundary conditions, applying conforming
229 // constraints for non-conforming AMR, static condensation, etc.
230 if (static_cond) { a->EnableStaticCondensation(); }
231 a->Assemble();
232
233 HypreParMatrix A;
234 Vector B, X;
235 a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B);
236
237 if (myid == 0)
238 {
239 cout << "Size of linear system: " << A.GetGlobalNumRows() << endl;
240 }
241
242 // 12. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
243 // preconditioner from hypre.
244 HypreSolver *amg = new HypreBoomerAMG(A);
245 HyprePCG *pcg = new HyprePCG(A);
246 pcg->SetTol(1e-12);
247 pcg->SetMaxIter(200);
248 pcg->SetPrintLevel(2);
249 pcg->SetPreconditioner(*amg);
250 pcg->Mult(B, X);
251
252 // 13. Recover the parallel grid function corresponding to X. This is the
253 // local finite element solution on each processor.
254 a->RecoverFEMSolution(X, *b, x);
255
256 // 14. Save the refined mesh and the solution in parallel. This output can
257 // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
258 {
259 ostringstream mesh_name, sol_name;
260 mesh_name << "mesh." << setfill('0') << setw(6) << myid;
261 sol_name << "sol." << setfill('0') << setw(6) << myid;
262
263 ofstream mesh_ofs(mesh_name.str().c_str());
264 mesh_ofs.precision(8);
265 pmesh->Print(mesh_ofs);
266
267 ofstream sol_ofs(sol_name.str().c_str());
268 sol_ofs.precision(8);
269 x.Save(sol_ofs);
270 }
271
272 // 15. Send the solution by socket to a GLVis server.
273 if (visualization)
274 {
275 char vishost[] = "localhost";
276 int visport = 19916;
277 socketstream sol_sock(vishost, visport);
278 sol_sock << "parallel " << num_procs << " " << myid << "\n";
279 sol_sock.precision(8);
280 sol_sock << "solution\n" << *pmesh << x << flush;
281 }
282
283 // 16. Free the used memory.
284 delete pcg;
285 delete amg;
286 delete a;
287 delete b;
288 delete fespace;
289 if (order > 0) { delete fec; }
290 delete pmesh;
291
292 pumi_mesh->destroyNative();
293 apf::destroyMesh(pumi_mesh);
294 PCU_Comm_Free();
295
296 #ifdef MFEM_USE_SIMMETRIX
297 gmi_sim_stop();
298 Sim_unregisterAllKeys();
299 #endif
300
301 MPI_Finalize();
302
303 return 0;
304 }
305