1 //                       MFEM Example 6 - Parallel Version
2 //                              PUMI Modification
3 //
4 // Compile with: make ex6p
5 //
6 // Sample runs:  mpirun -np 8 ex6p
7 //
8 // Description:  This is a version of Example 1 with a simple adaptive mesh
9 //               refinement loop. The problem being solved is again the Laplace
10 //               equation -Delta u = 1 with homogeneous Dirichlet boundary
11 //               conditions. The problem is solved on a sequence of meshes which
12 //               are adapted in a conforming (tetrahedrons) manner according
13 //               to a simple SPR ZZ error estimator.
14 //
15 //               This PUMI variation also performs a "uniform" refinement,
16 //               similar to MFEM examples, for coarse meshes. However, the
17 //               refinement is performed using the PUMI API. A new option "-ar"
18 //               is added to modify the "adapt_ratio" which is the fraction of
19 //               allowable error that scales the output size field of the error
20 //               estimator.
21 //
22 // NOTE:         Model/Mesh files for this example are in the (large) data file
23 //               repository of MFEM here https://github.com/mfem/data under the
24 //               folder named "pumi", which consists of the following sub-folders:
25 //               a) geom -->  model files
26 //               b) parallel --> parallel pumi mesh files
27 //               c) serial --> serial pumi mesh files
28 
29 #include "mfem.hpp"
30 #include <fstream>
31 #include <iostream>
32 
33 #ifdef MFEM_USE_SIMMETRIX
34 #include <SimUtil.h>
35 #include <gmi_sim.h>
36 #endif
37 #include <apfMDS.h>
38 #include <gmi_null.h>
39 #include <PCU.h>
40 #include <spr.h>
41 #include <apfConvert.h>
42 #include <gmi_mesh.h>
43 #include <crv.h>
44 
45 #ifndef MFEM_USE_PUMI
46 #error This example requires that MFEM is built with MFEM_USE_PUMI=YES
47 #endif
48 
49 using namespace std;
50 using namespace mfem;
51 
main(int argc,char * argv[])52 int main(int argc, char *argv[])
53 {
54    // 1. Initialize MPI.
55    int num_procs, myid;
56    MPI_Init(&argc, &argv);
57    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
58    MPI_Comm_rank(MPI_COMM_WORLD, &myid);
59 
60    // 2. Parse command-line options.
61    const char *mesh_file = "../../data/pumi/parallel/Kova/Kova100k_8.smb";
62 #ifdef MFEM_USE_SIMMETRIX
63    const char *model_file = "../../data/pumi/geom/Kova.x_t";
64    const char *smd_file = NULL;
65 #else
66    const char *model_file = "../../data/pumi/geom/Kova.dmg";
67 #endif
68    int order = 1;
69    bool static_cond = false;
70    bool visualization = 1;
71    int geom_order = 1;
72    double adapt_ratio = 0.05;
73 
74    OptionsParser args(argc, argv);
75    args.AddOption(&mesh_file, "-m", "--mesh",
76                   "Mesh file to use.");
77    args.AddOption(&order, "-o", "--order",
78                   "Finite element order (polynomial degree) or -1 for"
79                   " isoparametric space.");
80    args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
81                   "--no-static-condensation", "Enable static condensation.");
82    args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
83                   "--no-visualization",
84                   "Enable or disable GLVis visualization.");
85    args.AddOption(&model_file, "-p", "--model",
86                   "parasolid or .dmg model to use.");
87 #ifdef MFEM_USE_SIMMETRIX
88    args.AddOption(&smd_file, "-sm", "--smd_model",
89                   "smd model file to use.");
90 #endif
91    args.AddOption(&geom_order, "-go", "--geometry_order",
92                   "Geometric order of the model");
93    args.AddOption(&adapt_ratio, "-ar", "--adapt_ratio",
94                   "adaptation factor used in MeshAdapt");
95    args.Parse();
96    if (!args.Good())
97    {
98       if (myid == 0)
99       {
100          args.PrintUsage(cout);
101       }
102       MPI_Finalize();
103       return 1;
104    }
105    if (myid == 0)
106    {
107       args.PrintOptions(cout);
108    }
109 
110    // 3. Read the SCOREC Mesh.
111    PCU_Comm_Init();
112 #ifdef MFEM_USE_SIMMETRIX
113    Sim_readLicenseFile(0);
114    gmi_sim_start();
115    gmi_register_sim();
116 #endif
117    gmi_register_mesh();
118 
119    apf::Mesh2* pumi_mesh;
120 #ifdef MFEM_USE_SIMMETRIX
121    if (smd_file)
122    {
123       gmi_model *mixed_model = gmi_sim_load(model_file, smd_file);
124       pumi_mesh = apf::loadMdsMesh(mixed_model, mesh_file);
125    }
126    else
127 #endif
128    {
129       pumi_mesh = apf::loadMdsMesh(model_file, mesh_file);
130    }
131 
132    // 4. Increase the geometry order and refine the mesh if necessary.  Parallel
133    //    uniform refinement is performed if the total number of elements is less
134    //    than 100,000.
135    int dim = pumi_mesh->getDimension();
136    int nEle = pumi_mesh->count(dim);
137    int ref_levels = (int)floor(log(100000./nEle)/log(2.)/dim);
138 
139    if (geom_order > 1)
140    {
141       crv::BezierCurver bc(pumi_mesh, geom_order, 2);
142       bc.run();
143    }
144 
145    // Perform Uniform refinement
146    if (myid == 1)
147    {
148       std::cout << " ref level : " <<     ref_levels << std::endl;
149    }
150 
151    if (ref_levels > 1)
152    {
153       ma::Input* uniInput = ma::configureUniformRefine(pumi_mesh, ref_levels);
154 
155       if ( geom_order > 1)
156       {
157          crv::adapt(uniInput);
158       }
159       else
160       {
161          ma::adapt(uniInput);
162       }
163    }
164 
165    pumi_mesh->verify();
166 
167    // 5. Create the parallel MFEM mesh object from the parallel PUMI mesh.  We
168    //    can handle triangular and tetrahedral meshes. Note that the mesh
169    //    resolution is performed on the PUMI mesh.
170    ParMesh *pmesh = new ParPumiMesh(MPI_COMM_WORLD, pumi_mesh);
171 
172    // 6. Define a parallel finite element space on the parallel mesh. Here we
173    //    use continuous Lagrange finite elements of the specified order. If
174    //    order < 1, we instead use an isoparametric/isogeometric space.
175    FiniteElementCollection *fec;
176    if (order > 0)
177    {
178       fec = new H1_FECollection(order, dim);
179    }
180    else if (pmesh->GetNodes())
181    {
182       fec = pmesh->GetNodes()->OwnFEC();
183       if (myid == 1)
184       {
185          cout << "Using isoparametric FEs: " << fec->Name() << endl;
186       }
187    }
188    else
189    {
190       fec = new H1_FECollection(order = 1, dim);
191    }
192    ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
193    HYPRE_BigInt size = fespace->GlobalTrueVSize();
194    if (myid == 1)
195    {
196       cout << "Number of finite element unknowns: " << size << endl;
197    }
198 
199    // 7. Set up the parallel linear form b(.) which corresponds to the
200    //    right-hand side of the FEM linear system, which in this case is
201    //    (1,phi_i) where phi_i are the basis functions in fespace.
202    ParLinearForm *b = new ParLinearForm(fespace);
203    ConstantCoefficient one(1.0);
204    b->AddDomainIntegrator(new DomainLFIntegrator(one));
205 
206    // 8. Define the solution vector x as a parallel finite element grid function
207    //    corresponding to fespace. Initialize x with initial guess of zero,
208    //    which satisfies the boundary conditions.
209    ParGridFunction x(fespace);
210    x = 0.0;
211 
212    // 9. Connect to GLVis.
213    char vishost[] = "localhost";
214    int  visport   = 19916;
215 
216    socketstream sout;
217    if (visualization)
218    {
219       sout.open(vishost, visport);
220       if (!sout)
221       {
222          if (myid == 0)
223          {
224             cout << "Unable to connect to GLVis server at "
225                  << vishost << ':' << visport << endl;
226             cout << "GLVis visualization disabled.\n";
227          }
228          visualization = false;
229       }
230 
231       sout.precision(8);
232    }
233 
234    // 10. Set up the parallel bilinear form a(.,.) on the finite element space
235    //     corresponding to the Laplacian operator -Delta, by adding the
236    //     Diffusion domain integrator.
237    ParBilinearForm *a = new ParBilinearForm(fespace);
238    a->AddDomainIntegrator(new DiffusionIntegrator(one));
239 
240    // 11. Assemble the parallel bilinear form and the corresponding linear
241    //     system, applying any necessary transformations such as: parallel
242    //     assembly, eliminating boundary conditions, applying conforming
243    //     constraints for non-conforming AMR, static condensation, etc.
244    if (static_cond) { a->EnableStaticCondensation(); }
245 
246    // 12. The main AMR loop. In each iteration we solve the problem on the
247    //     current mesh, visualize the solution, and adapt the mesh.
248    apf::Field* Tmag_field = 0;
249    apf::Field* temp_field = 0;
250    apf::Field* ipfield = 0;
251    apf::Field* sizefield = 0;
252    int max_iter = 3;
253 
254    for (int Itr = 0; Itr < max_iter; Itr++)
255    {
256       HYPRE_BigInt global_dofs = fespace->GlobalTrueVSize();
257       if (myid == 1)
258       {
259          cout << "\nAMR iteration " << Itr << endl;
260          cout << "Number of unknowns: " << global_dofs << endl;
261       }
262 
263       // Assemble.
264       a->Assemble();
265       b->Assemble();
266 
267       // Essential boundary condition.
268       Array<int> ess_tdof_list;
269       if (pmesh->bdr_attributes.Size())
270       {
271          Array<int> ess_bdr(pmesh->bdr_attributes.Max());
272          ess_bdr = 1;
273          fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
274       }
275 
276       // Form linear system.
277       HypreParMatrix A;
278       Vector B, X;
279       const int copy_interior = 1;
280       a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B, copy_interior);
281 
282       // 13. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
283       //     preconditioner from hypre.
284       HypreBoomerAMG amg;
285       amg.SetPrintLevel(0);
286       CGSolver pcg(A.GetComm());
287       pcg.SetPreconditioner(amg);
288       pcg.SetOperator(A);
289       pcg.SetRelTol(1e-6);
290       pcg.SetMaxIter(200);
291       pcg.SetPrintLevel(3); // print the first and the last iterations only
292       pcg.Mult(B, X);
293 
294       // 14. Recover the parallel grid function corresponding to X. This is the
295       //     local finite element solution on each processor.
296       a->RecoverFEMSolution(X, *b, x);
297 
298       // 15. Save in parallel the displaced mesh and the inverted solution (which
299       //     gives the backward displacements to the original grid). This output
300       //     can be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
301       {
302          ostringstream mesh_name, sol_name;
303          mesh_name << "mesh." << setfill('0') << setw(6) << myid;
304          sol_name << "sol." << setfill('0') << setw(6) << myid;
305 
306          ofstream mesh_ofs(mesh_name.str().c_str());
307          mesh_ofs.precision(8);
308          pmesh->Print(mesh_ofs);
309 
310          ofstream sol_ofs(sol_name.str().c_str());
311          sol_ofs.precision(8);
312          x.Save(sol_ofs);
313       }
314 
315       // 16. Send the above data by socket to a GLVis server.  Use the "n" and "b"
316       //     keys in GLVis to visualize the displacements.
317       if (visualization)
318       {
319          sout << "parallel " << num_procs << " " << myid << "\n";
320          sout << "solution\n" << *pmesh << x << flush;
321       }
322 
323       // 17. Field transfer. Scalar solution field and magnitude field for error
324       //     estimation are created the PUMI mesh.
325       if (order > geom_order)
326       {
327          Tmag_field = apf::createField(pumi_mesh, "field_mag",
328                                        apf::SCALAR, apf::getLagrange(order));
329          temp_field = apf::createField(pumi_mesh, "T_field",
330                                        apf::SCALAR, apf::getLagrange(order));
331       }
332       else
333       {
334          Tmag_field = apf::createFieldOn(pumi_mesh, "field_mag",apf::SCALAR);
335          temp_field = apf::createFieldOn(pumi_mesh, "T_field", apf::SCALAR);
336       }
337 
338       ParPumiMesh* pPPmesh = dynamic_cast<ParPumiMesh*>(pmesh);
339       pPPmesh->FieldMFEMtoPUMI(pumi_mesh, &x, temp_field, Tmag_field);
340 
341       ipfield= spr::getGradIPField(Tmag_field, "MFEM_gradip", 2);
342       sizefield = spr::getSPRSizeField(ipfield, adapt_ratio);
343 
344       apf::destroyField(Tmag_field);
345       apf::destroyField(ipfield);
346 
347       // 18. Perform MesAdapt.
348       ma::Input* erinput = ma::configure(pumi_mesh, sizefield);
349       erinput->shouldFixShape = true;
350       erinput->maximumIterations = 2;
351       if ( geom_order > 1)
352       {
353          crv::adapt(erinput);
354       }
355       else
356       {
357          ma::adapt(erinput);
358       }
359 
360       ParMesh* Adapmesh = new ParPumiMesh(MPI_COMM_WORLD, pumi_mesh);
361       pPPmesh->UpdateMesh(Adapmesh);
362       delete Adapmesh;
363 
364       // 19. Update the FiniteElementSpace, GridFunction, and bilinear form.
365       fespace->Update();
366       x.Update();
367       x = 0.0;
368 
369       pPPmesh->FieldPUMItoMFEM(pumi_mesh, temp_field, &x);
370       a->Update();
371       b->Update();
372 
373       // Destroy fields.
374       apf::destroyField(temp_field);
375       apf::destroyField(sizefield);
376    }
377 
378    // 20. Free the used memory.
379    delete a;
380    delete b;
381    delete fespace;
382    if (order > 0) { delete fec; }
383    delete pmesh;
384 
385    pumi_mesh->destroyNative();
386    apf::destroyMesh(pumi_mesh);
387    PCU_Comm_Free();
388 
389 #ifdef MFEM_USE_SIMMETRIX
390    gmi_sim_stop();
391    Sim_unregisterAllKeys();
392 #endif
393 
394    MPI_Finalize();
395 
396    return 0;
397 }
398