1 //                       MFEM Example 6 - Parallel Version
2 //                              PETSc Modification
3 //
4 // Compile with: make ex6p
5 //
6 // Sample runs:
7 //    mpirun -np 4 ex6p -m ../../data/amr-quad.mesh
8 //    mpirun -np 4 ex6p -m ../../data/amr-quad.mesh -nonoverlapping
9 //
10 // Description:  This is a version of Example 1 with a simple adaptive mesh
11 //               refinement loop. The problem being solved is again the Laplace
12 //               equation -Delta u = 1 with homogeneous Dirichlet boundary
13 //               conditions. The problem is solved on a sequence of meshes which
14 //               are locally refined in a conforming (triangles, tetrahedrons)
15 //               or non-conforming (quadrilaterals, hexahedra) manner according
16 //               to a simple ZZ error estimator.
17 //
18 //               The example demonstrates MFEM's capability to work with both
19 //               conforming and nonconforming refinements, in 2D and 3D, on
20 //               linear, curved and surface meshes. Interpolation of functions
21 //               from coarse to fine meshes, as well as persistent GLVis
22 //               visualization are also illustrated.
23 //
24 //               PETSc assembly timings can be benchmarked if requested by
25 //               command line.
26 //
27 //               We recommend viewing Example 1 before viewing this example.
28 
29 #include "mfem.hpp"
30 #include <fstream>
31 #include <iostream>
32 
33 #ifndef MFEM_USE_PETSC
34 #error This example requires that MFEM is built with MFEM_USE_PETSC=YES
35 #endif
36 
37 using namespace std;
38 using namespace mfem;
39 
main(int argc,char * argv[])40 int main(int argc, char *argv[])
41 {
42    // 1. Initialize MPI.
43    int num_procs, myid;
44    MPI_Init(&argc, &argv);
45    MPI_Comm_size(MPI_COMM_WORLD, &num_procs);
46    MPI_Comm_rank(MPI_COMM_WORLD, &myid);
47 
48    // 2. Parse command-line options.
49    const char *mesh_file = "../../data/star.mesh";
50    int order = 1;
51    bool visualization = true;
52    int max_dofs = 100000;
53    bool use_petsc = true;
54    const char *petscrc_file = "";
55    bool use_nonoverlapping = false;
56 
57    OptionsParser args(argc, argv);
58    args.AddOption(&mesh_file, "-m", "--mesh",
59                   "Mesh file to use.");
60    args.AddOption(&order, "-o", "--order",
61                   "Finite element order (polynomial degree).");
62    args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
63                   "--no-visualization",
64                   "Enable or disable GLVis visualization.");
65    args.AddOption(&max_dofs, "-md", "--max_dofs",
66                   "Maximum number of dofs.");
67    args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
68                   "--no-petsc",
69                   "Use or not PETSc to solve the linear system.");
70    args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
71                   "PetscOptions file to use.");
72    args.AddOption(&use_nonoverlapping, "-nonoverlapping", "--nonoverlapping",
73                   "-no-nonoverlapping", "--no-nonoverlapping",
74                   "Use or not the block diagonal PETSc's matrix format "
75                   "for non-overlapping domain decomposition.");
76    args.Parse();
77    if (!args.Good())
78    {
79       if (myid == 0)
80       {
81          args.PrintUsage(cout);
82       }
83       MPI_Finalize();
84       return 1;
85    }
86    if (myid == 0)
87    {
88       args.PrintOptions(cout);
89    }
90    // 2b. We initialize PETSc
91    if (use_petsc) { MFEMInitializePetsc(NULL,NULL,petscrc_file,NULL); }
92 
93    // 3. Read the (serial) mesh from the given mesh file on all processors.  We
94    //    can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
95    //    and volume meshes with the same code.
96    Mesh *mesh = new Mesh(mesh_file, 1, 1);
97    int dim = mesh->Dimension();
98    int sdim = mesh->SpaceDimension();
99 
100    // 4. Refine the serial mesh on all processors to increase the resolution.
101    //    Also project a NURBS mesh to a piecewise-quadratic curved mesh. Make
102    //    sure that the mesh is non-conforming.
103    if (mesh->NURBSext)
104    {
105       mesh->UniformRefinement();
106       mesh->SetCurvature(2);
107    }
108    mesh->EnsureNCMesh();
109 
110    // 5. Define a parallel mesh by partitioning the serial mesh.
111    //    Once the parallel mesh is defined, the serial mesh can be deleted.
112    ParMesh pmesh(MPI_COMM_WORLD, *mesh);
113    delete mesh;
114 
115    MFEM_VERIFY(pmesh.bdr_attributes.Size() > 0,
116                "Boundary attributes required in the mesh.");
117    Array<int> ess_bdr(pmesh.bdr_attributes.Max());
118    ess_bdr = 1;
119 
120    // 6. Define a finite element space on the mesh. The polynomial order is
121    //    one (linear) by default, but this can be changed on the command line.
122    H1_FECollection fec(order, dim);
123    ParFiniteElementSpace fespace(&pmesh, &fec);
124 
125    // 7. As in Example 1p, we set up bilinear and linear forms corresponding to
126    //    the Laplace problem -\Delta u = 1. We don't assemble the discrete
127    //    problem yet, this will be done in the main loop.
128    ParBilinearForm a(&fespace);
129    ParLinearForm b(&fespace);
130 
131    ConstantCoefficient one(1.0);
132 
133    BilinearFormIntegrator *integ = new DiffusionIntegrator(one);
134    a.AddDomainIntegrator(integ);
135    b.AddDomainIntegrator(new DomainLFIntegrator(one));
136 
137    // 8. The solution vector x and the associated finite element grid function
138    //    will be maintained over the AMR iterations. We initialize it to zero.
139    ParGridFunction x(&fespace);
140    x = 0;
141 
142    // 9. Connect to GLVis.
143    char vishost[] = "localhost";
144    int  visport   = 19916;
145 
146    socketstream sout;
147    if (visualization)
148    {
149       sout.open(vishost, visport);
150       if (!sout)
151       {
152          if (myid == 0)
153          {
154             cout << "Unable to connect to GLVis server at "
155                  << vishost << ':' << visport << endl;
156             cout << "GLVis visualization disabled.\n";
157          }
158          visualization = false;
159       }
160 
161       sout.precision(8);
162    }
163 
164    // 10. Set up an error estimator. Here we use the Zienkiewicz-Zhu estimator
165    //     with L2 projection in the smoothing step to better handle hanging
166    //     nodes and parallel partitioning. We need to supply a space for the
167    //     discontinuous flux (L2) and a space for the smoothed flux (H(div) is
168    //     used here).
169    L2_FECollection flux_fec(order, dim);
170    ParFiniteElementSpace flux_fes(&pmesh, &flux_fec, sdim);
171    RT_FECollection smooth_flux_fec(order-1, dim);
172    ParFiniteElementSpace smooth_flux_fes(&pmesh, &smooth_flux_fec);
173    // Another possible option for the smoothed flux space:
174    // H1_FECollection smooth_flux_fec(order, dim);
175    // ParFiniteElementSpace smooth_flux_fes(&pmesh, &smooth_flux_fec, dim);
176    L2ZienkiewiczZhuEstimator estimator(*integ, x, flux_fes, smooth_flux_fes);
177 
178    // 11. A refiner selects and refines elements based on a refinement strategy.
179    //     The strategy here is to refine elements with errors larger than a
180    //     fraction of the maximum element error. Other strategies are possible.
181    //     The refiner will call the given error estimator.
182    ThresholdRefiner refiner(estimator);
183    refiner.SetTotalErrorFraction(0.7);
184 
185    // 12. The main AMR loop. In each iteration we solve the problem on the
186    //     current mesh, visualize the solution, and refine the mesh.
187    for (int it = 0; ; it++)
188    {
189       HYPRE_BigInt global_dofs = fespace.GlobalTrueVSize();
190       if (myid == 0)
191       {
192          cout << "\nAMR iteration " << it << endl;
193          cout << "Number of unknowns: " << global_dofs << endl;
194       }
195 
196       // 13. Assemble the stiffness matrix and the right-hand side. Note that
197       //     MFEM doesn't care at this point that the mesh is nonconforming
198       //     and parallel. The FE space is considered 'cut' along hanging
199       //     edges/faces, and also across processor boundaries.
200       a.Assemble();
201       b.Assemble();
202 
203       // 14. Create the parallel linear system: eliminate boundary conditions,
204       //     constrain hanging nodes and nodes across processor boundaries.
205       //     The system will be solved for true (unconstrained/unique) DOFs only.
206       Array<int> ess_tdof_list;
207       fespace.GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
208       double time;
209       const int copy_interior = 1;
210 
211       if (use_petsc)
212       {
213          a.SetOperatorType(use_nonoverlapping ?
214                            Operator::PETSC_MATIS : Operator::PETSC_MATAIJ);
215          PetscParMatrix pA;
216          Vector pX,pB;
217          MPI_Barrier(MPI_COMM_WORLD);
218          time = -MPI_Wtime();
219          a.FormLinearSystem(ess_tdof_list, x, b, pA, pX, pB, copy_interior);
220          MPI_Barrier(MPI_COMM_WORLD);
221          time += MPI_Wtime();
222          if (myid == 0) { cout << "PETSc assembly timing : " << time << endl; }
223       }
224 
225       a.Assemble();
226       b.Assemble();
227       a.SetOperatorType(Operator::Hypre_ParCSR);
228       HypreParMatrix A;
229       Vector B, X;
230       MPI_Barrier(MPI_COMM_WORLD);
231       time = -MPI_Wtime();
232       a.FormLinearSystem(ess_tdof_list, x, b, A, X, B, copy_interior);
233       MPI_Barrier(MPI_COMM_WORLD);
234       time += MPI_Wtime();
235       if (myid == 0) { cout << "HYPRE assembly timing : " << time << endl; }
236 
237       // 15. Define and apply a parallel PCG solver for AX=B with the BoomerAMG
238       //     preconditioner from hypre.
239       HypreBoomerAMG amg;
240       amg.SetPrintLevel(0);
241       CGSolver pcg(A.GetComm());
242       pcg.SetPreconditioner(amg);
243       pcg.SetOperator(A);
244       pcg.SetRelTol(1e-6);
245       pcg.SetMaxIter(200);
246       pcg.SetPrintLevel(3); // print the first and the last iterations only
247       pcg.Mult(B, X);
248 
249       // 16. Extract the parallel grid function corresponding to the finite element
250       //     approximation X. This is the local solution on each processor.
251       a.RecoverFEMSolution(X, b, x);
252 
253       // 17. Send the solution by socket to a GLVis server.
254       if (visualization)
255       {
256          sout << "parallel " << num_procs << " " << myid << "\n";
257          sout << "solution\n" << pmesh << x << flush;
258       }
259 
260       if (global_dofs > max_dofs)
261       {
262          if (myid == 0)
263          {
264             cout << "Reached the maximum number of dofs. Stop." << endl;
265          }
266          // we need to call Update here to delete any internal PETSc object that
267          // have been created by the ParBilinearForm; otherwise, these objects
268          // will be destroyed at the end of the main scope, when PETSc has been
269          // already finalized.
270          a.Update();
271          b.Update();
272          break;
273       }
274 
275       // 18. Call the refiner to modify the mesh. The refiner calls the error
276       //     estimator to obtain element errors, then it selects elements to be
277       //     refined and finally it modifies the mesh. The Stop() method can be
278       //     used to determine if a stopping criterion was met.
279       refiner.Apply(pmesh);
280       if (refiner.Stop())
281       {
282          if (myid == 0)
283          {
284             cout << "Stopping criterion satisfied. Stop." << endl;
285          }
286          a.Update();
287          b.Update();
288          break;
289       }
290 
291       // 19. Update the finite element space (recalculate the number of DOFs,
292       //     etc.) and create a grid function update matrix. Apply the matrix
293       //     to any GridFunctions over the space. In this case, the update
294       //     matrix is an interpolation matrix so the updated GridFunction will
295       //     still represent the same function as before refinement.
296       fespace.Update();
297       x.Update();
298 
299       // 20. Load balance the mesh, and update the space and solution. Currently
300       //     available only for nonconforming meshes.
301       if (pmesh.Nonconforming())
302       {
303          pmesh.Rebalance();
304 
305          // Update the space and the GridFunction. This time the update matrix
306          // redistributes the GridFunction among the processors.
307          fespace.Update();
308          x.Update();
309       }
310 
311       // 21. Inform also the bilinear and linear forms that the space has
312       //     changed.
313       a.Update();
314       b.Update();
315    }
316 
317    // We finalize PETSc
318    if (use_petsc) { MFEMFinalizePetsc(); }
319 
320    MPI_Finalize();
321    return 0;
322 }
323