1 /* ---------------------------------------------------------------------
2  *
3  * Copyright (C) 2020 by the deal.II authors
4  *
5  * This file is part of the deal.II library.
6  *
7  * The deal.II library is free software; you can use it, redistribute
8  * it, and/or modify it under the terms of the GNU Lesser General
9  * Public License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  * The full text of the license can be found in the file LICENSE.md at
12  * the top level directory of deal.II.
13  *
14  * ---------------------------------------------------------------------
15 
16  *
17  * Authors: Bruno Blais, Toni El Geitani Nehme, Rene Gassmoeller, Peter Munch
18  */
19 
20 // @sect3{Include files}
21 
22 #include <deal.II/base/bounding_box.h>
23 #include <deal.II/base/conditional_ostream.h>
24 #include <deal.II/base/discrete_time.h>
25 #include <deal.II/base/mpi.h>
26 #include <deal.II/base/parameter_acceptor.h>
27 #include <deal.II/base/timer.h>
28 
29 #include <deal.II/distributed/cell_weights.h>
30 #include <deal.II/distributed/solution_transfer.h>
31 #include <deal.II/distributed/tria.h>
32 
33 #include <deal.II/dofs/dof_handler.h>
34 #include <deal.II/dofs/dof_tools.h>
35 
36 #include <deal.II/fe/fe_q.h>
37 #include <deal.II/fe/fe_system.h>
38 #include <deal.II/fe/mapping_q.h>
39 
40 #include <deal.II/grid/grid_generator.h>
41 #include <deal.II/grid/grid_tools.h>
42 #include <deal.II/grid/tria_accessor.h>
43 #include <deal.II/grid/tria_iterator.h>
44 
45 #include <deal.II/lac/la_parallel_vector.h>
46 #include <deal.II/lac/vector.h>
47 
48 #include <deal.II/numerics/data_out.h>
49 #include <deal.II/numerics/vector_tools.h>
50 
51 // From the following include file we import the ParticleHandler class
52 // that allows you to manage
53 // a collection of particles (objects of type Particles::Particle), representing
54 // a collection of points with some attached properties (e.g., an id) floating
55 // on a parallel::distributed::Triangulation. The methods and classes in the
56 // namespace Particles allows one to easily implement Particle-In-Cell methods
57 // and particle tracing on distributed triangulations:
58 #include <deal.II/particles/particle_handler.h>
59 
60 // We import the particles generator
61 // which allow us to insert the particles. In the present step, the particle
62 // are globally inserted using a non-matching hyper-shell triangulation:
63 #include <deal.II/particles/generators.h>
64 
65 // Since the particles do not form a triangulation, they have their
66 // own specific DataOut class which will enable us to write them
67 // to commonly used parallel vtu format (or any number of other file formats):
68 #include <deal.II/particles/data_out.h>
69 
70 #include <cmath>
71 #include <iostream>
72 
73 
74 
75 namespace Step68
76 {
77   using namespace dealii;
78 
79   // @sect3{Run-time parameter handling}
80 
81   // Similarly to what is done in step-60, we set up a class that holds
82   // all the parameters of our problem and derive it from the ParameterAcceptor
83   // class to simplify the management and creation of parameter files.
84   //
85   // The ParameterAcceptor paradigm requires all parameters to be writable by
86   // the ParameterAcceptor methods. In order to avoid bugs that would be very
87   // difficult to track down (such as writing things like `if (time = 0)`
88   // instead of `if(time == 0)`), we declare all the parameters in an external
89   // class, which is initialized before the actual `ParticleTracking` class, and
90   // pass it to the main class as a `const` reference.
91   //
92   // The constructor of the class is responsible for the connection between the
93   // members of this class and the corresponding entries in the
94   // ParameterHandler. Thanks to the use of the
95   // ParameterHandler::add_parameter() method, this connection is trivial, but
96   // requires all members of this class to be writable.
97   class ParticleTrackingParameters : public ParameterAcceptor
98   {
99   public:
100     ParticleTrackingParameters();
101 
102     // This class consists largely of member variables that
103     // describe the details of the particle tracking simulation and its
104     // discretization. The following parameters are about where output should
105     // written to, the spatial discretization of the velocity (the default is
106     // $Q_1$), the time step and the output frequency (how many time steps
107     // should elapse before we generate graphical output again):
108     std::string output_directory = "./";
109 
110     unsigned int velocity_degree       = 1;
111     double       time_step             = 0.002;
112     double       final_time            = 4.0;
113     unsigned int output_frequency      = 10;
114     unsigned int repartition_frequency = 5;
115 
116     // We allow every grid to be refined independently. In this tutorial, no
117     // physics is resolved on the fluid grid, and its velocity is calculated
118     // analytically.
119     unsigned int fluid_refinement              = 4;
120     unsigned int particle_insertion_refinement = 3;
121   };
122 
123 
124 
125   // There remains the task of declaring what run-time parameters we can accept
126   // in input files. Since we have a very limited number of parameters, all
127   // parameters are declared in the same section.
ParticleTrackingParameters()128   ParticleTrackingParameters::ParticleTrackingParameters()
129     : ParameterAcceptor("Particle Tracking Problem/")
130   {
131     add_parameter(
132       "Velocity degree", velocity_degree, "", prm, Patterns::Integer(1));
133 
134     add_parameter("Output frequency",
135                   output_frequency,
136                   "Iteration frequency at which output results are written",
137                   prm,
138                   Patterns::Integer(1));
139 
140     add_parameter("Repartition frequency",
141                   repartition_frequency,
142                   "Iteration frequency at which the mesh is load balanced",
143                   prm,
144                   Patterns::Integer(1));
145 
146     add_parameter("Output directory", output_directory);
147 
148     add_parameter("Time step", time_step, "", prm, Patterns::Double());
149 
150     add_parameter("Final time",
151                   final_time,
152                   "End time of the simulation",
153                   prm,
154                   Patterns::Double());
155 
156     add_parameter("Fluid refinement",
157                   fluid_refinement,
158                   "Refinement level of the fluid domain",
159                   prm,
160                   Patterns::Integer(0));
161 
162     add_parameter(
163       "Particle insertion refinement",
164       particle_insertion_refinement,
165       "Refinement of the volumetric mesh used to insert the particles",
166       prm,
167       Patterns::Integer(0));
168   }
169 
170 
171 
172   // @sect3{Velocity profile}
173 
174   // The velocity profile is provided as a Function object.
175   // This function is hard-coded within
176   // the example.
177   template <int dim>
178   class Vortex : public Function<dim>
179   {
180   public:
Vortex()181     Vortex()
182       : Function<dim>(dim)
183     {}
184 
185 
186     virtual void vector_value(const Point<dim> &point,
187                               Vector<double> &  values) const override;
188   };
189 
190 
191   // The velocity profile for the Rayleigh-Kothe vertex is time-dependent.
192   // Consequently, the current time in the
193   // simulation (t) must be gathered from the Function object.
194   template <int dim>
vector_value(const Point<dim> & point,Vector<double> & values) const195   void Vortex<dim>::vector_value(const Point<dim> &point,
196                                  Vector<double> &  values) const
197   {
198     const double T = 4;
199     const double t = this->get_time();
200 
201     const double px = numbers::PI * point(0);
202     const double py = numbers::PI * point(1);
203     const double pt = numbers::PI / T * t;
204 
205     values[0] = -2 * cos(pt) * pow(sin(px), 2) * sin(py) * cos(py);
206     values[1] = 2 * cos(pt) * pow(sin(py), 2) * sin(px) * cos(px);
207     if (dim == 3)
208       {
209         values[2] = 0;
210       }
211   }
212 
213 
214 
215   // @sect3{The <code>ParticleTracking</code> class declaration}
216 
217   // We are now ready to introduce the main class of our tutorial program.
218   template <int dim>
219   class ParticleTracking
220   {
221   public:
222     ParticleTracking(const ParticleTrackingParameters &par,
223                      const bool                        interpolated_velocity);
224     void run();
225 
226   private:
227     // This function is responsible for the initial
228     // generation of the particles on top of the background grid.
229     void generate_particles();
230 
231     // When the velocity profile is interpolated to the position of the
232     // particles, it must first be stored using degrees of freedom.
233     // Consequently, as is the case for other parallel case (e.g. step-40) we
234     // initialize the degrees of freedom on the background grid.
235     void setup_background_dofs();
236 
237     // In one of the test cases, the function is mapped to the background grid
238     // and a finite element interpolation is used to calculate the velocity
239     // at the particle location. This function calculates the value of the
240     // function at the support point of the triangulation.
241     void interpolate_function_to_field();
242 
243     // The next two functions are responsible for carrying out step of explicit
244     // Euler time integration for the cases where the velocity field is
245     // interpolated at the positions of the particles or calculated
246     // analytically, respectively.
247     void euler_step_interpolated(const double dt);
248     void euler_step_analytical(const double dt);
249 
250     // The `cell_weight()` function indicates to the triangulation how much
251     // computational work is expected to happen on this cell, and consequently
252     // how the domain needs to be partitioned so that every MPI rank receives a
253     // roughly equal amount of work (potentially not an equal number of cells).
254     // While the function is called from the outside, it is connected to the
255     // corresponding signal from inside this class, therefore it can be
256     // `private`.
257     unsigned int cell_weight(
258       const typename parallel::distributed::Triangulation<dim>::cell_iterator
259         &cell,
260       const typename parallel::distributed::Triangulation<dim>::CellStatus
261         status) const;
262 
263     // The following two functions are responsible for outputting the simulation
264     // results for the particles and for the velocity profile on the background
265     // mesh, respectively.
266     void output_particles(const unsigned int it);
267     void output_background(const unsigned int it);
268 
269     // The private members of this class are similar to other parallel deal.II
270     // examples. The parameters are stored as a `const` member. It is important
271     // to note that we keep the `Vortex` class as a member since its time
272     // must be modified as the simulation proceeds.
273 
274     const ParticleTrackingParameters &par;
275 
276     MPI_Comm                                  mpi_communicator;
277     parallel::distributed::Triangulation<dim> background_triangulation;
278     Particles::ParticleHandler<dim>           particle_handler;
279 
280     DoFHandler<dim>                            fluid_dh;
281     FESystem<dim>                              fluid_fe;
282     MappingQ1<dim>                             mapping;
283     LinearAlgebra::distributed::Vector<double> velocity_field;
284 
285     Vortex<dim> velocity;
286 
287     ConditionalOStream pcout;
288 
289     bool interpolated_velocity;
290   };
291 
292 
293 
294   // @sect3{The <code>PatricleTracking</code> class implementation}
295 
296   // @sect4{Constructor}
297 
298   // The constructors and destructors are rather trivial. They are very similar
299   // to what is done in step-40. We set the processors we want to work on
300   // to all machines available (`MPI_COMM_WORLD`) and
301   // initialize the <code>pcout</code> variable to only allow processor zero
302   // to output anything to the standard output.
303 
304   template <int dim>
ParticleTracking(const ParticleTrackingParameters & par,const bool interpolated_velocity)305   ParticleTracking<dim>::ParticleTracking(const ParticleTrackingParameters &par,
306                                           const bool interpolated_velocity)
307     : par(par)
308     , mpi_communicator(MPI_COMM_WORLD)
309     , background_triangulation(mpi_communicator)
310     , fluid_dh(background_triangulation)
311     , fluid_fe(FE_Q<dim>(par.velocity_degree), dim)
312     , pcout(std::cout, Utilities::MPI::this_mpi_process(mpi_communicator) == 0)
313     , interpolated_velocity(interpolated_velocity)
314 
315   {}
316 
317 
318 
319   // @sect4{Cell weight}
320 
321   // This function is the key component that allow us to dynamically balance the
322   // computational load for this example. The function attributes a weight to
323   // every cell that represents the computational work on this cell. Here the
324   // majority of work is expected to happen on the particles, therefore the
325   // return value of this function (representing "work for this cell") is
326   // calculated based on the number of particles in the current cell.
327   // The function is
328   // connected to the cell_weight() signal inside the triangulation, and will be
329   // called once per cell, whenever the triangulation repartitions the domain
330   // between ranks (the connection is created inside the
331   // generate_particles() function of this class).
332   template <int dim>
cell_weight(const typename parallel::distributed::Triangulation<dim>::cell_iterator & cell,const typename parallel::distributed::Triangulation<dim>::CellStatus status) const333   unsigned int ParticleTracking<dim>::cell_weight(
334     const typename parallel::distributed::Triangulation<dim>::cell_iterator
335       &                                                                  cell,
336     const typename parallel::distributed::Triangulation<dim>::CellStatus status)
337     const
338   {
339     // We do not assign any weight to cells we do not own (i.e., artificial
340     // or ghost cells)
341     if (!cell->is_locally_owned())
342       return 0;
343 
344     // This determines how important particle work is compared to cell
345     // work (by default every cell has a weight of 1000).
346     // We set the weight per particle much higher to indicate that
347     // the particle load is the only one that is important to distribute the
348     // cells in this example. The optimal value of this number depends on the
349     // application and can range from 0 (cheap particle operations,
350     // expensive cell operations) to much larger than 1000 (expensive
351     // particle operations, cheap cell operations, like presumed in this
352     // example).
353     const unsigned int particle_weight = 10000;
354 
355     // This example does not use adaptive refinement, therefore every cell
356     // should have the status `CELL_PERSIST`. However this function can also
357     // be used to distribute load during refinement, therefore we consider
358     // refined or coarsened cells as well.
359     if (status == parallel::distributed::Triangulation<dim>::CELL_PERSIST ||
360         status == parallel::distributed::Triangulation<dim>::CELL_REFINE)
361       {
362         const unsigned int n_particles_in_cell =
363           particle_handler.n_particles_in_cell(cell);
364         return n_particles_in_cell * particle_weight;
365       }
366     else if (status == parallel::distributed::Triangulation<dim>::CELL_COARSEN)
367       {
368         unsigned int n_particles_in_cell = 0;
369 
370         for (unsigned int child_index = 0; child_index < cell->n_children();
371              ++child_index)
372           n_particles_in_cell +=
373             particle_handler.n_particles_in_cell(cell->child(child_index));
374 
375         return n_particles_in_cell * particle_weight;
376       }
377 
378     Assert(false, ExcInternalError());
379     return 0;
380   }
381 
382 
383 
384   // @sect4{Particles generation}
385 
386   // This function generates the tracer particles and the background
387   // triangulation on which these particles evolve.
388   template <int dim>
generate_particles()389   void ParticleTracking<dim>::generate_particles()
390   {
391     // We create a hyper cube triangulation which we globally refine. This
392     // triangulation covers the full trajectory of the particles.
393     GridGenerator::hyper_cube(background_triangulation, 0, 1);
394     background_triangulation.refine_global(par.fluid_refinement);
395 
396     // In order to consider the particles when repartitioning the triangulation
397     // the algorithm needs to know three things:
398     //
399     // 1. How much weight to assign to each cell (how many particles are in
400     // there);
401     // 2. How to pack the particles before shipping data around;
402     // 3. How to unpack the particles after repartitioning.
403     //
404     // We attach the correct functions to the signals inside
405     // parallel::distributed::Triangulation. These signal will be called every
406     // time the repartition() function is called. These connections only need to
407     // be created once, so we might as well have set them up in the constructor
408     // of this class, but for the purpose of this example we want to group the
409     // particle related instructions.
410     background_triangulation.signals.cell_weight.connect(
411       [&](
412         const typename parallel::distributed::Triangulation<dim>::cell_iterator
413           &cell,
414         const typename parallel::distributed::Triangulation<dim>::CellStatus
415           status) -> unsigned int { return this->cell_weight(cell, status); });
416 
417     background_triangulation.signals.pre_distributed_repartition.connect(
418       [this]() { this->particle_handler.register_store_callback_function(); });
419 
420     background_triangulation.signals.post_distributed_repartition.connect(
421       [&]() { this->particle_handler.register_load_callback_function(false); });
422 
423     // This initializes the background triangulation where the particles are
424     // living and the number of properties of the particles.
425     particle_handler.initialize(background_triangulation, mapping, 1 + dim);
426 
427     // We create a particle triangulation which is solely used to generate
428     // the points which will be used to insert the particles. This
429     // triangulation is a hyper shell which is offset from the
430     // center of the simulation domain. This will be used to generate a
431     // disk filled with particles which will allow an easy monitoring
432     // of the motion due to the vortex.
433     Point<dim> center;
434     center[0] = 0.5;
435     center[1] = 0.75;
436     if (dim == 3)
437       center[2] = 0.5;
438 
439     const double outer_radius = 0.15;
440     const double inner_radius = 0.01;
441 
442     parallel::distributed::Triangulation<dim> particle_triangulation(
443       MPI_COMM_WORLD);
444 
445     GridGenerator::hyper_shell(
446       particle_triangulation, center, inner_radius, outer_radius, 6);
447     particle_triangulation.refine_global(par.particle_insertion_refinement);
448 
449     // We generate the necessary bounding boxes for the particles generator.
450     // These bounding boxes are required to quickly identify in which
451     // process's subdomain the inserted particle lies, and which cell owns it.
452     const auto my_bounding_box = GridTools::compute_mesh_predicate_bounding_box(
453       background_triangulation, IteratorFilters::LocallyOwnedCell());
454     const auto global_bounding_boxes =
455       Utilities::MPI::all_gather(MPI_COMM_WORLD, my_bounding_box);
456 
457     // We generate an empty vector of properties. We will attribute the
458     // properties to the particles once they are generated.
459     std::vector<std::vector<double>> properties(
460       particle_triangulation.n_locally_owned_active_cells(),
461       std::vector<double>(dim + 1, 0.));
462 
463     // We generate the particles at the position of a single
464     // point quadrature. Consequently, one particle will be generated
465     // at the centroid of each cell.
466     Particles::Generators::quadrature_points(particle_triangulation,
467                                              QMidpoint<dim>(),
468                                              global_bounding_boxes,
469                                              particle_handler,
470                                              mapping,
471                                              properties);
472 
473     pcout << "Number of particles inserted: "
474           << particle_handler.n_global_particles() << std::endl;
475   }
476 
477 
478 
479   // @sect4{Background DOFs and interpolation}
480 
481   // This function sets up the background degrees of freedom used for the
482   // velocity interpolation and allocates the field vector where the entire
483   // solution of the velocity field is stored.
484   template <int dim>
setup_background_dofs()485   void ParticleTracking<dim>::setup_background_dofs()
486   {
487     fluid_dh.distribute_dofs(fluid_fe);
488     const IndexSet locally_owned_dofs = fluid_dh.locally_owned_dofs();
489     IndexSet       locally_relevant_dofs;
490     DoFTools::extract_locally_relevant_dofs(fluid_dh, locally_relevant_dofs);
491 
492     velocity_field.reinit(locally_owned_dofs,
493                           locally_relevant_dofs,
494                           mpi_communicator);
495   }
496 
497 
498 
499   // This function takes care of interpolating the
500   // vortex velocity field to the field vector. This is achieved rather easily
501   // by using the VectorTools::interpolate() function.
502   template <int dim>
interpolate_function_to_field()503   void ParticleTracking<dim>::interpolate_function_to_field()
504   {
505     velocity_field.zero_out_ghosts();
506     VectorTools::interpolate(mapping, fluid_dh, velocity, velocity_field);
507     velocity_field.update_ghost_values();
508   }
509 
510 
511 
512   // @sect4{Time integration of the trajectories}
513 
514   // We integrate the particle trajectories
515   // using an analytically defined velocity field. This demonstrates a
516   // relatively trivial usage of the particles.
517   template <int dim>
euler_step_analytical(const double dt)518   void ParticleTracking<dim>::euler_step_analytical(const double dt)
519   {
520     const unsigned int this_mpi_rank =
521       Utilities::MPI::this_mpi_process(mpi_communicator);
522     Vector<double> particle_velocity(dim);
523 
524     // Looping over all particles in the domain using a particle iterator
525     for (auto &particle : particle_handler)
526       {
527         // We calculate the velocity of the particles using their current
528         // location.
529         Point<dim> particle_location = particle.get_location();
530         velocity.vector_value(particle_location, particle_velocity);
531 
532         // This updates the position of the particles and sets the old position
533         // equal to the new position of the particle.
534         for (int d = 0; d < dim; ++d)
535           particle_location[d] += particle_velocity[d] * dt;
536 
537         particle.set_location(particle_location);
538 
539         // We store the processor id (a scalar) and the particle velocity (a
540         // vector) in the particle properties. In this example, this is done
541         // purely for visualization purposes.
542         ArrayView<double> properties = particle.get_properties();
543         for (int d = 0; d < dim; ++d)
544           properties[d] = particle_velocity[d];
545         properties[dim] = this_mpi_rank;
546       }
547   }
548 
549 
550 
551   // In contrast to the previous function in this function we
552   // integrate the particle trajectories by interpolating the value of
553   // the velocity field at the degrees of freedom to the position of
554   // the particles.
555   template <int dim>
euler_step_interpolated(const double dt)556   void ParticleTracking<dim>::euler_step_interpolated(const double dt)
557   {
558     Vector<double> local_dof_values(fluid_fe.dofs_per_cell);
559 
560     // We loop over all the local particles. Although this could be achieved
561     // directly by looping over all the cells, this would force us
562     // to loop over numerous cells which do not contain particles.
563     // Rather, we loop over all the particles, but, we get the reference
564     // of the cell in which the particle lies and then loop over all particles
565     // within that cell. This enables us to gather the values of the velocity
566     // out of the `velocity_field` vector once and use them for all particles
567     // that lie within the cell.
568     auto particle = particle_handler.begin();
569     while (particle != particle_handler.end())
570       {
571         const auto cell =
572           particle->get_surrounding_cell(background_triangulation);
573         const auto dh_cell =
574           typename DoFHandler<dim>::cell_iterator(*cell, &fluid_dh);
575 
576         dh_cell->get_dof_values(velocity_field, local_dof_values);
577 
578         // Next, compute the velocity at the particle locations by evaluating
579         // the finite element solution at the position of the particles.
580         // This is essentially an optimized version of the particle advection
581         // functionality in step 19, but instead of creating quadrature
582         // objects and FEValues objects for each cell, we do the
583         // evaluation by hand, which is somewhat more efficient and only
584         // matters for this tutorial, because the particle work is the
585         // dominant cost of the whole program.
586         const auto pic = particle_handler.particles_in_cell(cell);
587         Assert(pic.begin() == particle, ExcInternalError());
588         for (auto &p : pic)
589           {
590             const Point<dim> reference_location = p.get_reference_location();
591             Tensor<1, dim>   particle_velocity;
592             for (unsigned int j = 0; j < fluid_fe.dofs_per_cell; ++j)
593               {
594                 const auto comp_j = fluid_fe.system_to_component_index(j);
595 
596                 particle_velocity[comp_j.first] +=
597                   fluid_fe.shape_value(j, reference_location) *
598                   local_dof_values[j];
599               }
600 
601             Point<dim> particle_location = particle->get_location();
602             for (int d = 0; d < dim; ++d)
603               particle_location[d] += particle_velocity[d] * dt;
604             p.set_location(particle_location);
605 
606             // Again, we store the particle velocity and the processor id in the
607             // particle properties for visualization purposes.
608             ArrayView<double> properties = p.get_properties();
609             for (int d = 0; d < dim; ++d)
610               properties[d] = particle_velocity[d];
611 
612             properties[dim] =
613               Utilities::MPI::this_mpi_process(mpi_communicator);
614 
615             ++particle;
616           }
617       }
618   }
619 
620 
621 
622   // @sect4{Data output}
623 
624   // The next two functions take care of writing both the particles
625   // and the background mesh to vtu with a pvtu record. This ensures
626   // that the simulation results can be visualized when the simulation is
627   // launched in parallel.
628   template <int dim>
output_particles(const unsigned int it)629   void ParticleTracking<dim>::output_particles(const unsigned int it)
630   {
631     Particles::DataOut<dim, dim> particle_output;
632 
633     std::vector<std::string> solution_names(dim, "velocity");
634     solution_names.push_back("process_id");
635     std::vector<DataComponentInterpretation::DataComponentInterpretation>
636       data_interpretations;
637 
638     std::vector<DataComponentInterpretation::DataComponentInterpretation>
639       data_component_interpretation(
640         dim, DataComponentInterpretation::component_is_part_of_vector);
641     data_component_interpretation.push_back(
642       DataComponentInterpretation::component_is_scalar);
643 
644     particle_output.build_patches(particle_handler,
645                                   solution_names,
646                                   data_component_interpretation);
647     const std::string output_folder(par.output_directory);
648     const std::string file_name(interpolated_velocity ?
649                                   "interpolated-particles" :
650                                   "analytical-particles");
651 
652     pcout << "Writing particle output file: " << file_name << "-" << it
653           << std::endl;
654 
655     particle_output.write_vtu_with_pvtu_record(
656       output_folder, file_name, it, mpi_communicator, 6);
657   }
658 
659 
660 
661   template <int dim>
output_background(const unsigned int it)662   void ParticleTracking<dim>::output_background(const unsigned int it)
663   {
664     std::vector<std::string> solution_names(dim, "velocity");
665     std::vector<DataComponentInterpretation::DataComponentInterpretation>
666       data_component_interpretation(
667         dim, DataComponentInterpretation::component_is_part_of_vector);
668 
669     DataOut<dim> data_out;
670 
671     // Attach the solution data to data_out object
672     data_out.attach_dof_handler(fluid_dh);
673     data_out.add_data_vector(velocity_field,
674                              solution_names,
675                              DataOut<dim>::type_dof_data,
676                              data_component_interpretation);
677     Vector<float> subdomain(background_triangulation.n_active_cells());
678     for (unsigned int i = 0; i < subdomain.size(); ++i)
679       subdomain(i) = background_triangulation.locally_owned_subdomain();
680     data_out.add_data_vector(subdomain, "subdomain");
681 
682     data_out.build_patches(mapping);
683 
684     const std::string output_folder(par.output_directory);
685     const std::string file_name("background");
686 
687     pcout << "Writing background field file: " << file_name << "-" << it
688           << std::endl;
689 
690     data_out.write_vtu_with_pvtu_record(
691       output_folder, file_name, it, mpi_communicator, 6);
692   }
693 
694 
695 
696   // @sect4{Running the simulation}
697   // This function orchestrates the entire simulation. It is very similar
698   // to the other time dependent tutorial programs -- take step-21 or step-26 as
699   // an example. Note that we use the DiscreteTime class to monitor the time,
700   // the time-step and the step-number. This function is relatively
701   // straightforward.
702 
703   template <int dim>
run()704   void ParticleTracking<dim>::run()
705   {
706     DiscreteTime discrete_time(0, par.final_time, par.time_step);
707 
708     generate_particles();
709 
710     pcout << "Repartitioning triangulation after particle generation"
711           << std::endl;
712     background_triangulation.repartition();
713 
714     // We set the initial property of the particles by doing an
715     // explicit Euler iteration with a time-step of 0 both in the case
716     // of the analytical and the interpolated approach.
717     if (interpolated_velocity)
718       {
719         setup_background_dofs();
720         interpolate_function_to_field();
721         euler_step_interpolated(0.);
722       }
723     else
724       euler_step_analytical(0.);
725 
726     output_particles(discrete_time.get_step_number());
727     if (interpolated_velocity)
728       output_background(discrete_time.get_step_number());
729 
730     // The particles are advected by looping over time.
731     while (!discrete_time.is_at_end())
732       {
733         discrete_time.advance_time();
734         velocity.set_time(discrete_time.get_previous_time());
735 
736         if ((discrete_time.get_step_number() % par.repartition_frequency) == 0)
737           {
738             background_triangulation.repartition();
739             if (interpolated_velocity)
740               setup_background_dofs();
741           }
742 
743         if (interpolated_velocity)
744           {
745             interpolate_function_to_field();
746             euler_step_interpolated(discrete_time.get_previous_step_size());
747           }
748         else
749           euler_step_analytical(discrete_time.get_previous_step_size());
750 
751         // After the particles have been moved, it is necessary to identify
752         // in which cell they now reside. This is achieved by calling
753         // <code>sort_particles_into_subdomains_and_cells</code>
754         particle_handler.sort_particles_into_subdomains_and_cells();
755 
756         if ((discrete_time.get_step_number() % par.output_frequency) == 0)
757           {
758             output_particles(discrete_time.get_step_number());
759             if (interpolated_velocity)
760               output_background(discrete_time.get_step_number());
761           }
762       }
763   }
764 
765 } // namespace Step68
766 
767 
768 
769 // @sect3{The main() function}
770 
771 // The remainder of the code, the `main()` function, is standard.
772 // We note that we run the particle tracking with the analytical velocity
773 // and the interpolated velocity and produce both results
main(int argc,char * argv[])774 int main(int argc, char *argv[])
775 {
776   using namespace Step68;
777   using namespace dealii;
778   deallog.depth_console(1);
779 
780   try
781     {
782       Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
783 
784       std::string prm_file;
785       if (argc > 1)
786         prm_file = argv[1];
787       else
788         prm_file = "parameters.prm";
789 
790       ParticleTrackingParameters par;
791       ParameterAcceptor::initialize(prm_file);
792       {
793         Step68::ParticleTracking<2> particle_tracking(par, false);
794         particle_tracking.run();
795       }
796       {
797         Step68::ParticleTracking<2> particle_tracking(par, true);
798         particle_tracking.run();
799       }
800     }
801   catch (std::exception &exc)
802     {
803       std::cerr << std::endl
804                 << std::endl
805                 << "----------------------------------------------------"
806                 << std::endl;
807       std::cerr << "Exception on processing: " << std::endl
808                 << exc.what() << std::endl
809                 << "Aborting!" << std::endl
810                 << "----------------------------------------------------"
811                 << std::endl;
812 
813       return 1;
814     }
815   catch (...)
816     {
817       std::cerr << std::endl
818                 << std::endl
819                 << "----------------------------------------------------"
820                 << std::endl;
821       std::cerr << "Unknown exception!" << std::endl
822                 << "Aborting!" << std::endl
823                 << "----------------------------------------------------"
824                 << std::endl;
825       return 1;
826     }
827 
828   return 0;
829 }
830