1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2018 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_petsc_block_vector_h
17 #define dealii_petsc_block_vector_h
18 
19 
20 #include <deal.II/base/config.h>
21 
22 #ifdef DEAL_II_WITH_PETSC
23 
24 #  include <deal.II/lac/block_indices.h>
25 #  include <deal.II/lac/block_vector_base.h>
26 #  include <deal.II/lac/exceptions.h>
27 #  include <deal.II/lac/petsc_vector.h>
28 #  include <deal.II/lac/vector_type_traits.h>
29 
30 DEAL_II_NAMESPACE_OPEN
31 
32 
33 namespace PETScWrappers
34 {
35   // forward declaration
36   class BlockVector;
37 
38   namespace MPI
39   {
40     /*! @addtogroup PETScWrappers
41      *@{
42      */
43 
44     /**
45      * An implementation of block vectors based on the parallel vector class
46      * implemented in PETScWrappers. While the base class provides for most of
47      * the interface, this class handles the actual allocation of vectors and
48      * provides functions that are specific to the underlying vector type.
49      *
50      * The model of distribution of data is such that each of the blocks is
51      * distributed across all MPI processes named in the MPI communicator.
52      * I.e. we don't just distribute the whole vector, but each component. In
53      * the constructors and reinit() functions, one therefore not only has to
54      * specify the sizes of the individual blocks, but also the number of
55      * elements of each of these blocks to be stored on the local process.
56      *
57      * @ingroup Vectors @see
58      * @ref GlossBlockLA "Block (linear algebra)"
59      */
60     class BlockVector : public BlockVectorBase<Vector>
61     {
62     public:
63       /**
64        * Typedef the base class for simpler access to its own alias.
65        */
66       using BaseClass = BlockVectorBase<Vector>;
67 
68       /**
69        * Typedef the type of the underlying vector.
70        */
71       using BlockType = BaseClass::BlockType;
72 
73       /**
74        * Import the alias from the base class.
75        */
76       using value_type      = BaseClass::value_type;
77       using pointer         = BaseClass::pointer;
78       using const_pointer   = BaseClass::const_pointer;
79       using reference       = BaseClass::reference;
80       using const_reference = BaseClass::const_reference;
81       using size_type       = BaseClass::size_type;
82       using iterator        = BaseClass::iterator;
83       using const_iterator  = BaseClass::const_iterator;
84 
85       /**
86        * Default constructor. Generate an empty vector without any blocks.
87        */
88       BlockVector() = default;
89 
90       /**
91        * Constructor. Generate a block vector with @p n_blocks blocks, each of
92        * which is a parallel vector across @p communicator with @p block_size
93        * elements of which @p local_size elements are stored on the present
94        * process.
95        */
96       explicit BlockVector(const unsigned int n_blocks,
97                            const MPI_Comm &   communicator,
98                            const size_type    block_size,
99                            const size_type    local_size);
100 
101       /**
102        * Copy constructor. Set all the properties of the parallel vector to
103        * those of the given argument and copy the elements.
104        */
105       BlockVector(const BlockVector &V);
106 
107       /**
108        * Constructor. Set the number of blocks to <tt>block_sizes.size()</tt>
109        * and initialize each block with <tt>block_sizes[i]</tt> zero elements.
110        * The individual blocks are distributed across the given communicator,
111        * and each store <tt>local_elements[i]</tt> elements on the present
112        * process.
113        */
114       BlockVector(const std::vector<size_type> &block_sizes,
115                   const MPI_Comm &              communicator,
116                   const std::vector<size_type> &local_elements);
117 
118       /**
119        * Create a BlockVector with parallel_partitioning.size() blocks, each
120        * initialized with the given IndexSet.
121        */
122       explicit BlockVector(const std::vector<IndexSet> &parallel_partitioning,
123                            const MPI_Comm &communicator = MPI_COMM_WORLD);
124 
125       /**
126        * Same as above, but include ghost elements
127        */
128       BlockVector(const std::vector<IndexSet> &parallel_partitioning,
129                   const std::vector<IndexSet> &ghost_indices,
130                   const MPI_Comm &             communicator);
131 
132 
133 
134       /**
135        * Destructor. Clears memory
136        */
137       ~BlockVector() override = default;
138 
139       /**
140        * Copy operator: fill all components of the vector that are locally
141        * stored with the given scalar value.
142        */
143       BlockVector &
144       operator=(const value_type s);
145 
146       /**
147        * Copy operator for arguments of the same type.
148        */
149       BlockVector &
150       operator=(const BlockVector &V);
151 
152       /**
153        * Reinitialize the BlockVector to contain @p n_blocks of size @p
154        * block_size, each of which stores @p local_size elements locally. The
155        * @p communicator argument denotes which MPI channel each of these
156        * blocks shall communicate.
157        *
158        * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
159        * zeros.
160        */
161       void
162       reinit(const unsigned int n_blocks,
163              const MPI_Comm &   communicator,
164              const size_type    block_size,
165              const size_type    local_size,
166              const bool         omit_zeroing_entries = false);
167 
168       /**
169        * Reinitialize the BlockVector such that it contains
170        * <tt>block_sizes.size()</tt> blocks. Each block is reinitialized to
171        * dimension <tt>block_sizes[i]</tt>. Each of them stores
172        * <tt>local_sizes[i]</tt> elements on the present process.
173        *
174        * If the number of blocks is the same as before this function was
175        * called, all vectors remain the same and reinit() is called for each
176        * vector.
177        *
178        * If <tt>omit_zeroing_entries==false</tt>, the vector is filled with
179        * zeros.
180        *
181        * Note that you must call this (or the other reinit() functions)
182        * function, rather than calling the reinit() functions of an individual
183        * block, to allow the block vector to update its caches of vector
184        * sizes. If you call reinit() of one of the blocks, then subsequent
185        * actions on this object may yield unpredictable results since they may
186        * be routed to the wrong block.
187        */
188       void
189       reinit(const std::vector<size_type> &block_sizes,
190              const MPI_Comm &              communicator,
191              const std::vector<size_type> &local_sizes,
192              const bool                    omit_zeroing_entries = false);
193 
194       /**
195        * Change the dimension to that of the vector <tt>V</tt>. The same
196        * applies as for the other reinit() function.
197        *
198        * The elements of <tt>V</tt> are not copied, i.e.  this function is the
199        * same as calling <tt>reinit (V.size(), omit_zeroing_entries)</tt>.
200        *
201        * Note that you must call this (or the other reinit() functions)
202        * function, rather than calling the reinit() functions of an individual
203        * block, to allow the block vector to update its caches of vector
204        * sizes. If you call reinit() on one of the blocks, then subsequent
205        * actions on this object may yield unpredictable results since they may
206        * be routed to the wrong block.
207        */
208       void
209       reinit(const BlockVector &V, const bool omit_zeroing_entries = false);
210 
211       /**
212        * Reinitialize the BlockVector using IndexSets. See the constructor
213        * with the same arguments for details.
214        */
215       void
216       reinit(const std::vector<IndexSet> &parallel_partitioning,
217              const MPI_Comm &             communicator);
218 
219       /**
220        * Same as above but include ghost entries.
221        */
222       void
223       reinit(const std::vector<IndexSet> &parallel_partitioning,
224              const std::vector<IndexSet> &ghost_entries,
225              const MPI_Comm &             communicator);
226 
227       /**
228        * Change the number of blocks to <tt>num_blocks</tt>. The individual
229        * blocks will get initialized with zero size, so it is assumed that the
230        * user resizes the individual blocks by herself in an appropriate way,
231        * and calls <tt>collect_sizes</tt> afterwards.
232        */
233       void
234       reinit(const unsigned int num_blocks);
235 
236       /**
237        * Return if this vector is a ghosted vector (and thus read-only).
238        */
239       bool
240       has_ghost_elements() const;
241 
242       /**
243        * Return a reference to the MPI communicator object in use with this
244        * vector.
245        */
246       const MPI_Comm &
247       get_mpi_communicator() const;
248 
249       /**
250        * Swap the contents of this vector and the other vector <tt>v</tt>. One
251        * could do this operation with a temporary variable and copying over
252        * the data elements, but this function is significantly more efficient
253        * since it only swaps the pointers to the data of the two vectors and
254        * therefore does not need to allocate temporary storage and move data
255        * around.
256        *
257        * Limitation: right now this function only works if both vectors have
258        * the same number of blocks. If needed, the numbers of blocks should be
259        * exchanged, too.
260        *
261        * This function is analogous to the swap() function of all C++
262        * standard containers. Also, there is a global function swap(u,v) that
263        * simply calls <tt>u.swap(v)</tt>, again in analogy to standard
264        * functions.
265        */
266       void
267       swap(BlockVector &v);
268 
269       /**
270        * Print to a stream.
271        */
272       void
273       print(std::ostream &     out,
274             const unsigned int precision  = 3,
275             const bool         scientific = true,
276             const bool         across     = true) const;
277 
278       /**
279        * Exception
280        */
281       DeclException0(ExcIteratorRangeDoesNotMatchVectorSize);
282       /**
283        * Exception
284        */
285       DeclException0(ExcNonMatchingBlockVectors);
286     };
287 
288     /*@}*/
289 
290     /*--------------------- Inline functions --------------------------------*/
291 
BlockVector(const unsigned int n_blocks,const MPI_Comm & communicator,const size_type block_size,const size_type local_size)292     inline BlockVector::BlockVector(const unsigned int n_blocks,
293                                     const MPI_Comm &   communicator,
294                                     const size_type    block_size,
295                                     const size_type    local_size)
296     {
297       reinit(n_blocks, communicator, block_size, local_size);
298     }
299 
300 
301 
BlockVector(const std::vector<size_type> & block_sizes,const MPI_Comm & communicator,const std::vector<size_type> & local_elements)302     inline BlockVector::BlockVector(
303       const std::vector<size_type> &block_sizes,
304       const MPI_Comm &              communicator,
305       const std::vector<size_type> &local_elements)
306     {
307       reinit(block_sizes, communicator, local_elements, false);
308     }
309 
310 
BlockVector(const BlockVector & v)311     inline BlockVector::BlockVector(const BlockVector &v)
312       : BlockVectorBase<Vector>()
313     {
314       this->components.resize(v.n_blocks());
315       this->block_indices = v.block_indices;
316 
317       for (unsigned int i = 0; i < this->n_blocks(); ++i)
318         this->components[i] = v.components[i];
319     }
320 
BlockVector(const std::vector<IndexSet> & parallel_partitioning,const MPI_Comm & communicator)321     inline BlockVector::BlockVector(
322       const std::vector<IndexSet> &parallel_partitioning,
323       const MPI_Comm &             communicator)
324     {
325       reinit(parallel_partitioning, communicator);
326     }
327 
BlockVector(const std::vector<IndexSet> & parallel_partitioning,const std::vector<IndexSet> & ghost_indices,const MPI_Comm & communicator)328     inline BlockVector::BlockVector(
329       const std::vector<IndexSet> &parallel_partitioning,
330       const std::vector<IndexSet> &ghost_indices,
331       const MPI_Comm &             communicator)
332     {
333       reinit(parallel_partitioning, ghost_indices, communicator);
334     }
335 
336     inline BlockVector &
337     BlockVector::operator=(const value_type s)
338     {
339       BaseClass::operator=(s);
340       return *this;
341     }
342 
343     inline BlockVector &
344     BlockVector::operator=(const BlockVector &v)
345     {
346       // we only allow assignment to vectors with the same number of blocks
347       // or to an empty BlockVector
348       Assert(n_blocks() == 0 || n_blocks() == v.n_blocks(),
349              ExcDimensionMismatch(n_blocks(), v.n_blocks()));
350 
351       if (this->n_blocks() != v.n_blocks())
352         reinit(v.n_blocks());
353 
354       for (size_type i = 0; i < this->n_blocks(); ++i)
355         this->components[i] = v.block(i);
356 
357       collect_sizes();
358 
359       return *this;
360     }
361 
362 
363 
364     inline void
reinit(const unsigned int n_blocks,const MPI_Comm & communicator,const size_type block_size,const size_type local_size,const bool omit_zeroing_entries)365     BlockVector::reinit(const unsigned int n_blocks,
366                         const MPI_Comm &   communicator,
367                         const size_type    block_size,
368                         const size_type    local_size,
369                         const bool         omit_zeroing_entries)
370     {
371       reinit(std::vector<size_type>(n_blocks, block_size),
372              communicator,
373              std::vector<size_type>(n_blocks, local_size),
374              omit_zeroing_entries);
375     }
376 
377 
378 
379     inline void
reinit(const std::vector<size_type> & block_sizes,const MPI_Comm & communicator,const std::vector<size_type> & local_sizes,const bool omit_zeroing_entries)380     BlockVector::reinit(const std::vector<size_type> &block_sizes,
381                         const MPI_Comm &              communicator,
382                         const std::vector<size_type> &local_sizes,
383                         const bool                    omit_zeroing_entries)
384     {
385       this->block_indices.reinit(block_sizes);
386       if (this->components.size() != this->n_blocks())
387         this->components.resize(this->n_blocks());
388 
389       for (unsigned int i = 0; i < this->n_blocks(); ++i)
390         this->components[i].reinit(communicator,
391                                    block_sizes[i],
392                                    local_sizes[i],
393                                    omit_zeroing_entries);
394     }
395 
396 
397     inline void
reinit(const BlockVector & v,const bool omit_zeroing_entries)398     BlockVector::reinit(const BlockVector &v, const bool omit_zeroing_entries)
399     {
400       this->block_indices = v.get_block_indices();
401       if (this->components.size() != this->n_blocks())
402         this->components.resize(this->n_blocks());
403 
404       for (unsigned int i = 0; i < this->n_blocks(); ++i)
405         block(i).reinit(v.block(i), omit_zeroing_entries);
406     }
407 
408     inline void
reinit(const std::vector<IndexSet> & parallel_partitioning,const MPI_Comm & communicator)409     BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
410                         const MPI_Comm &             communicator)
411     {
412       std::vector<size_type> sizes(parallel_partitioning.size());
413       for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
414         sizes[i] = parallel_partitioning[i].size();
415 
416       this->block_indices.reinit(sizes);
417       if (this->components.size() != this->n_blocks())
418         this->components.resize(this->n_blocks());
419 
420       for (unsigned int i = 0; i < this->n_blocks(); ++i)
421         block(i).reinit(parallel_partitioning[i], communicator);
422     }
423 
424     inline void
reinit(const std::vector<IndexSet> & parallel_partitioning,const std::vector<IndexSet> & ghost_entries,const MPI_Comm & communicator)425     BlockVector::reinit(const std::vector<IndexSet> &parallel_partitioning,
426                         const std::vector<IndexSet> &ghost_entries,
427                         const MPI_Comm &             communicator)
428     {
429       std::vector<types::global_dof_index> sizes(parallel_partitioning.size());
430       for (unsigned int i = 0; i < parallel_partitioning.size(); ++i)
431         sizes[i] = parallel_partitioning[i].size();
432 
433       this->block_indices.reinit(sizes);
434       if (this->components.size() != this->n_blocks())
435         this->components.resize(this->n_blocks());
436 
437       for (unsigned int i = 0; i < this->n_blocks(); ++i)
438         block(i).reinit(parallel_partitioning[i],
439                         ghost_entries[i],
440                         communicator);
441     }
442 
443 
444 
445     inline const MPI_Comm &
get_mpi_communicator()446     BlockVector::get_mpi_communicator() const
447     {
448       return block(0).get_mpi_communicator();
449     }
450 
451     inline bool
has_ghost_elements()452     BlockVector::has_ghost_elements() const
453     {
454       bool ghosted = block(0).has_ghost_elements();
455 #  ifdef DEBUG
456       for (unsigned int i = 0; i < this->n_blocks(); ++i)
457         Assert(block(i).has_ghost_elements() == ghosted, ExcInternalError());
458 #  endif
459       return ghosted;
460     }
461 
462 
463     inline void
swap(BlockVector & v)464     BlockVector::swap(BlockVector &v)
465     {
466       std::swap(this->components, v.components);
467 
468       ::dealii::swap(this->block_indices, v.block_indices);
469     }
470 
471 
472 
473     inline void
print(std::ostream & out,const unsigned int precision,const bool scientific,const bool across)474     BlockVector::print(std::ostream &     out,
475                        const unsigned int precision,
476                        const bool         scientific,
477                        const bool         across) const
478     {
479       for (unsigned int i = 0; i < this->n_blocks(); ++i)
480         {
481           if (across)
482             out << 'C' << i << ':';
483           else
484             out << "Component " << i << std::endl;
485           this->components[i].print(out, precision, scientific, across);
486         }
487     }
488 
489 
490 
491     /**
492      * Global function which overloads the default implementation of the C++
493      * standard library which uses a temporary object. The function simply
494      * exchanges the data of the two vectors.
495      *
496      * @relatesalso PETScWrappers::MPI::BlockVector
497      */
498     inline void
swap(BlockVector & u,BlockVector & v)499     swap(BlockVector &u, BlockVector &v)
500     {
501       u.swap(v);
502     }
503 
504   } // namespace MPI
505 
506 } // namespace PETScWrappers
507 
508 namespace internal
509 {
510   namespace LinearOperatorImplementation
511   {
512     template <typename>
513     class ReinitHelper;
514 
515     /**
516      * A helper class used internally in linear_operator.h. Specialization for
517      * PETScWrappers::MPI::BlockVector.
518      */
519     template <>
520     class ReinitHelper<PETScWrappers::MPI::BlockVector>
521     {
522     public:
523       template <typename Matrix>
524       static void
reinit_range_vector(const Matrix & matrix,PETScWrappers::MPI::BlockVector & v,bool)525       reinit_range_vector(const Matrix &                   matrix,
526                           PETScWrappers::MPI::BlockVector &v,
527                           bool /*omit_zeroing_entries*/)
528       {
529         v.reinit(matrix.locally_owned_range_indices(),
530                  matrix.get_mpi_communicator());
531       }
532 
533       template <typename Matrix>
534       static void
reinit_domain_vector(const Matrix & matrix,PETScWrappers::MPI::BlockVector & v,bool)535       reinit_domain_vector(const Matrix &                   matrix,
536                            PETScWrappers::MPI::BlockVector &v,
537                            bool /*omit_zeroing_entries*/)
538       {
539         v.reinit(matrix.locally_owned_domain_indices(),
540                  matrix.get_mpi_communicator());
541       }
542     };
543 
544   } // namespace LinearOperatorImplementation
545 } /* namespace internal */
546 
547 
548 /**
549  * Declare dealii::PETScWrappers::MPI::BlockVector as distributed vector.
550  */
551 template <>
552 struct is_serial_vector<PETScWrappers::MPI::BlockVector> : std::false_type
553 {};
554 
555 
556 DEAL_II_NAMESPACE_CLOSE
557 
558 #endif // DEAL_II_WITH_PETSC
559 
560 #endif
561