1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2003 - 2020 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 
17 #include <deal.II/base/function.h>
18 #include <deal.II/base/logstream.h>
19 
20 #include <deal.II/dofs/dof_accessor.h>
21 
22 #include <deal.II/fe/fe.h>
23 
24 #include <deal.II/grid/tria.h>
25 #include <deal.II/grid/tria_iterator.h>
26 
27 #include <deal.II/lac/block_vector.h>
28 #include <deal.II/lac/la_parallel_block_vector.h>
29 #include <deal.II/lac/la_parallel_vector.h>
30 #include <deal.II/lac/trilinos_epetra_vector.h>
31 #include <deal.II/lac/trilinos_parallel_block_vector.h>
32 #include <deal.II/lac/trilinos_vector.h>
33 #include <deal.II/lac/vector.h>
34 
35 #include <deal.II/multigrid/mg_tools.h>
36 #include <deal.II/multigrid/mg_transfer.h>
37 #include <deal.II/multigrid/mg_transfer.templates.h>
38 #include <deal.II/multigrid/mg_transfer_internal.h>
39 
40 #include <algorithm>
41 
42 DEAL_II_NAMESPACE_OPEN
43 
44 
45 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
46 
47 
48 template <typename VectorType>
49 template <int dim, int spacedim>
50 void
fill_and_communicate_copy_indices(const DoFHandler<dim,spacedim> & mg_dof)51 MGLevelGlobalTransfer<VectorType>::fill_and_communicate_copy_indices(
52   const DoFHandler<dim, spacedim> &mg_dof)
53 {
54   internal::MGTransfer::fill_copy_indices(mg_dof,
55                                           mg_constrained_dofs,
56                                           copy_indices,
57                                           copy_indices_global_mine,
58                                           copy_indices_level_mine);
59 
60   // check if we can run a plain copy operation between the global DoFs and
61   // the finest level.
62   bool my_perform_plain_copy =
63     (copy_indices.back().size() == mg_dof.locally_owned_dofs().n_elements()) &&
64     (mg_dof.locally_owned_dofs().n_elements() ==
65      mg_dof
66        .locally_owned_mg_dofs(mg_dof.get_triangulation().n_global_levels() - 1)
67        .n_elements());
68   if (my_perform_plain_copy)
69     {
70       AssertDimension(copy_indices_global_mine.back().size(), 0);
71       AssertDimension(copy_indices_level_mine.back().size(), 0);
72 
73       // check whether there is a renumbering of degrees of freedom on
74       // either the finest level or the global dofs, which means that we
75       // cannot apply a plain copy
76       for (const auto &copy_index : copy_indices.back())
77         if (copy_index.first != copy_index.second)
78           {
79             my_perform_plain_copy = false;
80             break;
81           }
82     }
83 
84   // now do a global reduction over all processors to see what operation
85   // they can agree upon
86   if (const parallel::TriangulationBase<dim, spacedim> *ptria =
87         dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
88           &mg_dof.get_triangulation()))
89     perform_plain_copy = (Utilities::MPI::min(my_perform_plain_copy ? 1 : 0,
90                                               ptria->get_communicator()) == 1);
91   else
92     perform_plain_copy = my_perform_plain_copy;
93 }
94 
95 
96 
97 template <typename VectorType>
98 void
clear()99 MGLevelGlobalTransfer<VectorType>::clear()
100 {
101   sizes.resize(0);
102   copy_indices.clear();
103   copy_indices_global_mine.clear();
104   copy_indices_level_mine.clear();
105   component_to_block_map.resize(0);
106   mg_constrained_dofs = nullptr;
107   perform_plain_copy  = false;
108 }
109 
110 
111 
112 template <typename VectorType>
113 void
print_indices(std::ostream & os) const114 MGLevelGlobalTransfer<VectorType>::print_indices(std::ostream &os) const
115 {
116   for (unsigned int level = 0; level < copy_indices.size(); ++level)
117     {
118       for (unsigned int i = 0; i < copy_indices[level].size(); ++i)
119         os << "copy_indices[" << level << "]\t" << copy_indices[level][i].first
120            << '\t' << copy_indices[level][i].second << std::endl;
121     }
122 
123   for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
124     {
125       for (unsigned int i = 0; i < copy_indices_level_mine[level].size(); ++i)
126         os << "copy_ifrom  [" << level << "]\t"
127            << copy_indices_level_mine[level][i].first << '\t'
128            << copy_indices_level_mine[level][i].second << std::endl;
129     }
130   for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
131     {
132       for (unsigned int i = 0; i < copy_indices_global_mine[level].size(); ++i)
133         os << "copy_ito    [" << level << "]\t"
134            << copy_indices_global_mine[level][i].first << '\t'
135            << copy_indices_global_mine[level][i].second << std::endl;
136     }
137 }
138 
139 
140 
141 template <typename VectorType>
142 std::size_t
memory_consumption() const143 MGLevelGlobalTransfer<VectorType>::memory_consumption() const
144 {
145   std::size_t result = sizeof(*this);
146   result += MemoryConsumption::memory_consumption(sizes);
147   result += MemoryConsumption::memory_consumption(copy_indices);
148   result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
149   result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
150 
151   return result;
152 }
153 
154 
155 
156 /* ------------------ MGLevelGlobalTransfer<VectorType> ----------------- */
157 
158 namespace
159 {
160   template <int dim, int spacedim, typename Number>
161   void
fill_internal(const DoFHandler<dim,spacedim> & mg_dof,SmartPointer<const MGConstrainedDoFs,MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>> mg_constrained_dofs,const MPI_Comm mpi_communicator,const bool transfer_solution_vectors,std::vector<Table<2,unsigned int>> & copy_indices,std::vector<Table<2,unsigned int>> & copy_indices_global_mine,std::vector<Table<2,unsigned int>> & copy_indices_level_mine,LinearAlgebra::distributed::Vector<Number> & ghosted_global_vector,MGLevelObject<LinearAlgebra::distributed::Vector<Number>> & ghosted_level_vector)162   fill_internal(
163     const DoFHandler<dim, spacedim> &mg_dof,
164     SmartPointer<
165       const MGConstrainedDoFs,
166       MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>>
167                                                 mg_constrained_dofs,
168     const MPI_Comm                              mpi_communicator,
169     const bool                                  transfer_solution_vectors,
170     std::vector<Table<2, unsigned int>> &       copy_indices,
171     std::vector<Table<2, unsigned int>> &       copy_indices_global_mine,
172     std::vector<Table<2, unsigned int>> &       copy_indices_level_mine,
173     LinearAlgebra::distributed::Vector<Number> &ghosted_global_vector,
174     MGLevelObject<LinearAlgebra::distributed::Vector<Number>>
175       &ghosted_level_vector)
176   {
177     // first go to the usual routine...
178     std::vector<
179       std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
180       my_copy_indices;
181     std::vector<
182       std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
183       my_copy_indices_global_mine;
184     std::vector<
185       std::vector<std::pair<types::global_dof_index, types::global_dof_index>>>
186       my_copy_indices_level_mine;
187 
188     internal::MGTransfer::fill_copy_indices(mg_dof,
189                                             mg_constrained_dofs,
190                                             my_copy_indices,
191                                             my_copy_indices_global_mine,
192                                             my_copy_indices_level_mine,
193                                             !transfer_solution_vectors);
194 
195     // get all degrees of freedom that we need read access to in copy_to_mg
196     // and copy_from_mg, respectively. We fill an IndexSet once on each level
197     // (for the global_mine indices accessing remote level indices) and once
198     // globally (for the level_mine indices accessing remote global indices).
199 
200     // the variables index_set and level_index_set are going to define the
201     // ghost indices of the respective vectors (due to construction, these are
202     // precisely the indices that we need)
203 
204     IndexSet index_set(mg_dof.locally_owned_dofs().size());
205     std::vector<types::global_dof_index> accessed_indices;
206     ghosted_level_vector.resize(0,
207                                 mg_dof.get_triangulation().n_global_levels() -
208                                   1);
209     std::vector<IndexSet> level_index_set(
210       mg_dof.get_triangulation().n_global_levels());
211     for (unsigned int l = 0; l < mg_dof.get_triangulation().n_global_levels();
212          ++l)
213       {
214         for (const auto &indices : my_copy_indices_level_mine[l])
215           accessed_indices.push_back(indices.first);
216         std::vector<types::global_dof_index> accessed_level_indices;
217         for (const auto &indices : my_copy_indices_global_mine[l])
218           accessed_level_indices.push_back(indices.second);
219         std::sort(accessed_level_indices.begin(), accessed_level_indices.end());
220         level_index_set[l].set_size(mg_dof.locally_owned_mg_dofs(l).size());
221         level_index_set[l].add_indices(accessed_level_indices.begin(),
222                                        accessed_level_indices.end());
223         level_index_set[l].compress();
224         ghosted_level_vector[l].reinit(mg_dof.locally_owned_mg_dofs(l),
225                                        level_index_set[l],
226                                        mpi_communicator);
227       }
228     std::sort(accessed_indices.begin(), accessed_indices.end());
229     index_set.add_indices(accessed_indices.begin(), accessed_indices.end());
230     index_set.compress();
231     ghosted_global_vector.reinit(mg_dof.locally_owned_dofs(),
232                                  index_set,
233                                  mpi_communicator);
234 
235     // localize the copy indices for faster access. Since all access will be
236     // through the ghosted vector in 'data', we can use this (much faster)
237     // option
238     copy_indices.resize(mg_dof.get_triangulation().n_global_levels());
239     copy_indices_level_mine.resize(
240       mg_dof.get_triangulation().n_global_levels());
241     copy_indices_global_mine.resize(
242       mg_dof.get_triangulation().n_global_levels());
243     for (unsigned int level = 0;
244          level < mg_dof.get_triangulation().n_global_levels();
245          ++level)
246       {
247         const Utilities::MPI::Partitioner &global_partitioner =
248           *ghosted_global_vector.get_partitioner();
249         const Utilities::MPI::Partitioner &level_partitioner =
250           *ghosted_level_vector[level].get_partitioner();
251 
252         auto translate_indices =
253           [&](const std::vector<
254                 std::pair<types::global_dof_index, types::global_dof_index>>
255                 &                     global_copy_indices,
256               Table<2, unsigned int> &local_copy_indices) {
257             local_copy_indices.reinit(2, global_copy_indices.size());
258             for (unsigned int i = 0; i < global_copy_indices.size(); ++i)
259               {
260                 local_copy_indices(0, i) = global_partitioner.global_to_local(
261                   global_copy_indices[i].first);
262                 local_copy_indices(1, i) = level_partitioner.global_to_local(
263                   global_copy_indices[i].second);
264               }
265           };
266 
267         // owned-owned case
268         translate_indices(my_copy_indices[level], copy_indices[level]);
269 
270         // remote-owned case
271         translate_indices(my_copy_indices_level_mine[level],
272                           copy_indices_level_mine[level]);
273 
274         // owned-remote case
275         translate_indices(my_copy_indices_global_mine[level],
276                           copy_indices_global_mine[level]);
277       }
278   }
279 } // namespace
280 
281 template <typename Number>
282 template <int dim, int spacedim>
283 void
284 MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::
fill_and_communicate_copy_indices(const DoFHandler<dim,spacedim> & mg_dof)285   fill_and_communicate_copy_indices(const DoFHandler<dim, spacedim> &mg_dof)
286 {
287   const parallel::TriangulationBase<dim, spacedim> *ptria =
288     dynamic_cast<const parallel::TriangulationBase<dim, spacedim> *>(
289       &mg_dof.get_triangulation());
290   const MPI_Comm mpi_communicator =
291     ptria != nullptr ? ptria->get_communicator() : MPI_COMM_SELF;
292 
293   fill_internal(mg_dof,
294                 mg_constrained_dofs,
295                 mpi_communicator,
296                 false,
297                 this->copy_indices,
298                 this->copy_indices_global_mine,
299                 this->copy_indices_level_mine,
300                 ghosted_global_vector,
301                 ghosted_level_vector);
302 
303   // in case we have hanging nodes which imply different ownership of
304   // global and level dof indices, we must also fill the solution indices
305   // which contain additional indices, otherwise they can re-use the
306   // indices of the standard transfer.
307   int have_refinement_edge_dofs = 0;
308   if (mg_constrained_dofs != nullptr)
309     for (unsigned int level = 0;
310          level < mg_dof.get_triangulation().n_global_levels();
311          ++level)
312       if (mg_constrained_dofs->get_refinement_edge_indices(level).n_elements() >
313           0)
314         {
315           have_refinement_edge_dofs = 1;
316           break;
317         }
318   if (Utilities::MPI::max(have_refinement_edge_dofs, mpi_communicator) == 1)
319     fill_internal(mg_dof,
320                   mg_constrained_dofs,
321                   mpi_communicator,
322                   true,
323                   this->solution_copy_indices,
324                   this->solution_copy_indices_global_mine,
325                   this->solution_copy_indices_level_mine,
326                   solution_ghosted_global_vector,
327                   solution_ghosted_level_vector);
328   else
329     {
330       this->solution_copy_indices             = this->copy_indices;
331       this->solution_copy_indices_global_mine = this->copy_indices_global_mine;
332       this->solution_copy_indices_level_mine  = this->copy_indices_level_mine;
333       solution_ghosted_global_vector          = ghosted_global_vector;
334       solution_ghosted_level_vector           = ghosted_level_vector;
335     }
336 
337   bool my_perform_renumbered_plain_copy =
338     (this->copy_indices.back().n_cols() ==
339      mg_dof.locally_owned_dofs().n_elements());
340   bool my_perform_plain_copy = false;
341   if (my_perform_renumbered_plain_copy)
342     {
343       my_perform_plain_copy = true;
344       AssertDimension(this->copy_indices_global_mine.back().n_rows(), 0);
345       AssertDimension(this->copy_indices_level_mine.back().n_rows(), 0);
346 
347       // check whether there is a renumbering of degrees of freedom on
348       // either the finest level or the global dofs, which means that we
349       // cannot apply a plain copy
350       for (unsigned int i = 0; i < this->copy_indices.back().n_cols(); ++i)
351         if (this->copy_indices.back()(0, i) != this->copy_indices.back()(1, i))
352           {
353             my_perform_plain_copy = false;
354             break;
355           }
356     }
357 
358   // now do a global reduction over all processors to see what operation
359   // they can agree upon
360   perform_plain_copy =
361     Utilities::MPI::min(static_cast<int>(my_perform_plain_copy),
362                         mpi_communicator);
363   perform_renumbered_plain_copy =
364     Utilities::MPI::min(static_cast<int>(my_perform_renumbered_plain_copy),
365                         mpi_communicator);
366 
367   // if we do a plain copy, no need to hold additional ghosted vectors
368   if (perform_renumbered_plain_copy)
369     {
370       for (unsigned int i = 0; i < this->copy_indices.back().n_cols(); ++i)
371         AssertDimension(this->copy_indices.back()(0, i), i);
372 
373       ghosted_global_vector.reinit(0);
374       ghosted_level_vector.resize(0, 0);
375       solution_ghosted_global_vector.reinit(0);
376       solution_ghosted_level_vector.resize(0, 0);
377     }
378 }
379 
380 
381 
382 template <typename Number>
383 void
clear()384 MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::clear()
385 {
386   sizes.resize(0);
387   copy_indices.clear();
388   copy_indices_global_mine.clear();
389   copy_indices_level_mine.clear();
390   component_to_block_map.resize(0);
391   mg_constrained_dofs = nullptr;
392   ghosted_global_vector.reinit(0);
393   ghosted_level_vector.resize(0, 0);
394   perform_plain_copy            = false;
395   perform_renumbered_plain_copy = false;
396 }
397 
398 
399 
400 template <typename Number>
401 void
402 MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<Number>>::
print_indices(std::ostream & os) const403   print_indices(std::ostream &os) const
404 {
405   for (unsigned int level = 0; level < copy_indices.size(); ++level)
406     {
407       for (unsigned int i = 0; i < copy_indices[level].n_cols(); ++i)
408         os << "copy_indices[" << level << "]\t" << copy_indices[level](0, i)
409            << '\t' << copy_indices[level](1, i) << std::endl;
410     }
411 
412   for (unsigned int level = 0; level < copy_indices_level_mine.size(); ++level)
413     {
414       for (unsigned int i = 0; i < copy_indices_level_mine[level].n_cols(); ++i)
415         os << "copy_ifrom  [" << level << "]\t"
416            << copy_indices_level_mine[level](0, i) << '\t'
417            << copy_indices_level_mine[level](1, i) << std::endl;
418     }
419   for (unsigned int level = 0; level < copy_indices_global_mine.size(); ++level)
420     {
421       for (unsigned int i = 0; i < copy_indices_global_mine[level].n_cols();
422            ++i)
423         os << "copy_ito    [" << level << "]\t"
424            << copy_indices_global_mine[level](0, i) << '\t'
425            << copy_indices_global_mine[level](1, i) << std::endl;
426     }
427 }
428 
429 
430 
431 template <typename Number>
432 std::size_t
433 MGLevelGlobalTransfer<
memory_consumption() const434   LinearAlgebra::distributed::Vector<Number>>::memory_consumption() const
435 {
436   std::size_t result = sizeof(*this);
437   result += MemoryConsumption::memory_consumption(sizes);
438   result += MemoryConsumption::memory_consumption(copy_indices);
439   result += MemoryConsumption::memory_consumption(copy_indices_global_mine);
440   result += MemoryConsumption::memory_consumption(copy_indices_level_mine);
441   result += ghosted_global_vector.memory_consumption();
442   for (unsigned int i = ghosted_level_vector.min_level();
443        i <= ghosted_level_vector.max_level();
444        ++i)
445     result += ghosted_level_vector[i].memory_consumption();
446 
447   return result;
448 }
449 
450 
451 
452 // explicit instantiation
453 #include "mg_level_global_transfer.inst"
454 
455 // create an additional instantiation currently not supported by the automatic
456 // template instantiation scheme
457 template class MGLevelGlobalTransfer<LinearAlgebra::distributed::Vector<float>>;
458 
459 
460 DEAL_II_NAMESPACE_CLOSE
461