1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_petsc_sparse_matrix_h
17 #  define dealii_petsc_sparse_matrix_h
18 
19 
20 #  include <deal.II/base/config.h>
21 
22 #  ifdef DEAL_II_WITH_PETSC
23 
24 #    include <deal.II/lac/exceptions.h>
25 #    include <deal.II/lac/petsc_matrix_base.h>
26 #    include <deal.II/lac/petsc_vector.h>
27 
28 #    include <vector>
29 
30 DEAL_II_NAMESPACE_OPEN
31 // forward declaration
32 #    ifndef DOXYGEN
33 template <typename MatrixType>
34 class BlockMatrixBase;
35 #    endif
36 
37 namespace PETScWrappers
38 {
39   /**
40    * Implementation of a sequential sparse matrix class based on PETSc. All
41    * the functionality is actually in the base class, except for the calls to
42    * generate a sequential sparse matrix. This is possible since PETSc only
43    * works on an abstract matrix type and internally distributes to functions
44    * that do the actual work depending on the actual matrix type (much like
45    * using virtual functions). Only the functions creating a matrix of
46    * specific type differ, and are implemented in this particular class.
47    *
48    * @ingroup PETScWrappers
49    * @ingroup Matrix1
50    */
51   class SparseMatrix : public MatrixBase
52   {
53   public:
54     /**
55      * A structure that describes some of the traits of this class in terms of
56      * its run-time behavior. Some other classes (such as the block matrix
57      * classes) that take one or other of the matrix classes as its template
58      * parameters can tune their behavior based on the variables in this
59      * class.
60      */
61     struct Traits
62     {
63       /**
64        * It is safe to elide additions of zeros to individual elements of this
65        * matrix.
66        */
67       static const bool zero_addition_can_be_elided = true;
68     };
69 
70     /**
71      * Default constructor. Create an empty matrix.
72      */
73     SparseMatrix();
74 
75     /**
76      * Create a sparse matrix of dimensions @p m times @p n, with an initial
77      * guess of @p n_nonzero_per_row nonzero elements per row. PETSc is able
78      * to cope with the situation that more than this number of elements is
79      * later allocated for a row, but this involves copying data, and is thus
80      * expensive.
81      *
82      * The @p is_symmetric flag determines whether we should tell PETSc that
83      * the matrix is going to be symmetric (as indicated by the call
84      * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
85      * documentation states that one cannot form an ILU decomposition of a
86      * matrix for which this flag has been set to @p true, only an ICC. The
87      * default value of this flag is @p false.
88      */
89     SparseMatrix(const size_type m,
90                  const size_type n,
91                  const size_type n_nonzero_per_row,
92                  const bool      is_symmetric = false);
93 
94     /**
95      * Initialize a rectangular matrix with @p m rows and @p n columns.  The
96      * maximal number of nonzero entries for each row separately is given by
97      * the @p row_lengths array.
98      *
99      * Just as for the other constructors: PETSc is able to cope with the
100      * situation that more than this number of elements is later allocated for
101      * a row, but this involves copying data, and is thus expensive.
102      *
103      * The @p is_symmetric flag determines whether we should tell PETSc that
104      * the matrix is going to be symmetric (as indicated by the call
105      * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
106      * documentation states that one cannot form an ILU decomposition of a
107      * matrix for which this flag has been set to @p true, only an ICC. The
108      * default value of this flag is @p false.
109      */
110     SparseMatrix(const size_type               m,
111                  const size_type               n,
112                  const std::vector<size_type> &row_lengths,
113                  const bool                    is_symmetric = false);
114 
115     /**
116      * Initialize a sparse matrix using the given sparsity pattern.
117      *
118      * Note that PETSc can be very slow if you do not provide it with a good
119      * estimate of the lengths of rows. Using the present function is a very
120      * efficient way to do this, as it uses the exact number of nonzero
121      * entries for each row of the matrix by using the given sparsity pattern
122      * argument. If the @p preset_nonzero_locations flag is @p true, this
123      * function in addition not only sets the correct row sizes up front, but
124      * also pre-allocated the correct nonzero entries in the matrix.
125      *
126      * PETsc allows to later add additional nonzero entries to a matrix, by
127      * simply writing to these elements. However, this will then lead to
128      * additional memory allocations which are very inefficient and will
129      * greatly slow down your program. It is therefore significantly more
130      * efficient to get memory allocation right from the start.
131      */
132     template <typename SparsityPatternType>
133     explicit SparseMatrix(const SparsityPatternType &sparsity_pattern,
134                           const bool preset_nonzero_locations = true);
135 
136     /**
137      * This operator assigns a scalar to a matrix. Since this does usually not
138      * make much sense (should we set all matrix entries to this value? Only
139      * the nonzero entries of the sparsity pattern?), this operation is only
140      * allowed if the actual value to be assigned is zero. This operator only
141      * exists to allow for the obvious notation <tt>matrix=0</tt>, which sets
142      * all elements of the matrix to zero, but keep the sparsity pattern
143      * previously used.
144      */
145     SparseMatrix &
146     operator=(const double d);
147 
148     /**
149      * The copy constructor is deleted.
150      */
151     SparseMatrix(const SparseMatrix &) = delete;
152 
153     /**
154      * The copy assignment operator is deleted.
155      */
156     SparseMatrix &
157     operator=(const SparseMatrix &) = delete;
158 
159     /**
160      * Throw away the present matrix and generate one that has the same
161      * properties as if it were created by the constructor of this class with
162      * the same argument list as the present function.
163      */
164     void
165     reinit(const size_type m,
166            const size_type n,
167            const size_type n_nonzero_per_row,
168            const bool      is_symmetric = false);
169 
170     /**
171      * Throw away the present matrix and generate one that has the same
172      * properties as if it were created by the constructor of this class with
173      * the same argument list as the present function.
174      */
175     void
176     reinit(const size_type               m,
177            const size_type               n,
178            const std::vector<size_type> &row_lengths,
179            const bool                    is_symmetric = false);
180 
181     /**
182      * Initialize a sparse matrix using the given sparsity pattern.
183      *
184      * Note that PETSc can be very slow if you do not provide it with a good
185      * estimate of the lengths of rows. Using the present function is a very
186      * efficient way to do this, as it uses the exact number of nonzero
187      * entries for each row of the matrix by using the given sparsity pattern
188      * argument. If the @p preset_nonzero_locations flag is @p true, this
189      * function in addition not only sets the correct row sizes up front, but
190      * also pre-allocated the correct nonzero entries in the matrix.
191      *
192      * PETsc allows to later add additional nonzero entries to a matrix, by
193      * simply writing to these elements. However, this will then lead to
194      * additional memory allocations which are very inefficient and will
195      * greatly slow down your program. It is therefore significantly more
196      * efficient to get memory allocation right from the start.
197      *
198      * Despite the fact that it would seem to be an obvious win, setting the
199      * @p preset_nonzero_locations flag to @p true doesn't seem to accelerate
200      * program. Rather on the contrary, it seems to be able to slow down
201      * entire programs somewhat. This is surprising, since we can use
202      * efficient function calls into PETSc that allow to create multiple
203      * entries at once; nevertheless, given the fact that it is inefficient,
204      * the respective flag has a default value equal to @p false.
205      */
206     template <typename SparsityPatternType>
207     void
208     reinit(const SparsityPatternType &sparsity_pattern,
209            const bool                 preset_nonzero_locations = true);
210 
211     /**
212      * Return a reference to the MPI communicator object in use with this
213      * matrix. Since this is a sequential matrix, it returns the MPI_COMM_SELF
214      * communicator.
215      */
216     virtual const MPI_Comm &
217     get_mpi_communicator() const override;
218 
219     /**
220      * Return the number of rows of this matrix.
221      */
222     size_t
223     m() const;
224 
225     /**
226      * Return the number of columns of this matrix.
227      */
228     size_t
229     n() const;
230 
231     /**
232      * Perform the matrix-matrix multiplication $C = AB$, or,
233      * $C = A \text{diag}(V) B$ given a compatible vector $V$.
234      *
235      * This function calls MatrixBase::mmult() to do the actual work.
236      */
237     void
238     mmult(SparseMatrix &      C,
239           const SparseMatrix &B,
240           const MPI::Vector & V = MPI::Vector()) const;
241 
242     /**
243      * Perform the matrix-matrix multiplication with the transpose of
244      * <tt>this</tt>, i.e., $C = A^T B$, or,
245      * $C = A^T \text{diag}(V) B$ given a compatible vector $V$.
246      *
247      * This function calls MatrixBase::Tmmult() to do the actual work.
248      */
249     void
250     Tmmult(SparseMatrix &      C,
251            const SparseMatrix &B,
252            const MPI::Vector & V = MPI::Vector()) const;
253 
254   private:
255     /**
256      * Do the actual work for the respective reinit() function and the
257      * matching constructor, i.e. create a matrix. Getting rid of the previous
258      * matrix is left to the caller.
259      */
260     void
261     do_reinit(const size_type m,
262               const size_type n,
263               const size_type n_nonzero_per_row,
264               const bool      is_symmetric = false);
265 
266     /**
267      * Same as previous function.
268      */
269     void
270     do_reinit(const size_type               m,
271               const size_type               n,
272               const std::vector<size_type> &row_lengths,
273               const bool                    is_symmetric = false);
274 
275     /**
276      * Same as previous function.
277      */
278     template <typename SparsityPatternType>
279     void
280     do_reinit(const SparsityPatternType &sparsity_pattern,
281               const bool                 preset_nonzero_locations);
282 
283     // To allow calling protected prepare_add() and prepare_set().
284     friend class BlockMatrixBase<SparseMatrix>;
285   };
286 
287   namespace MPI
288   {
289     /**
290      * Implementation of a parallel sparse matrix class based on PETSc, with
291      * rows of the matrix distributed across an MPI network. All the
292      * functionality is actually in the base class, except for the calls to
293      * generate a parallel sparse matrix. This is possible since PETSc only
294      * works on an abstract matrix type and internally distributes to
295      * functions that do the actual work depending on the actual matrix type
296      * (much like using virtual functions). Only the functions creating a
297      * matrix of specific type differ, and are implemented in this particular
298      * class.
299      *
300      * There are a number of comments on the communication model as well as
301      * access to individual elements in the documentation to the parallel
302      * vector class. These comments apply here as well.
303      *
304      *
305      * <h3>Partitioning of matrices</h3>
306      *
307      * PETSc partitions parallel matrices so that each MPI process "owns" a
308      * certain number of rows (i.e. only this process stores the respective
309      * entries in these rows). The number of rows each process owns has to be
310      * passed to the constructors and reinit() functions via the argument @p
311      * local_rows. The individual values passed as @p local_rows on all the
312      * MPI processes of course have to add up to the global number of rows of
313      * the matrix.
314      *
315      * In addition to this, PETSc also partitions the rectangular chunk of the
316      * matrix it owns (i.e. the @p local_rows times n() elements in the
317      * matrix), so that matrix vector multiplications can be performed
318      * efficiently. This column-partitioning therefore has to match the
319      * partitioning of the vectors with which the matrix is multiplied, just
320      * as the row-partitioning has to match the partitioning of destination
321      * vectors. This partitioning is passed to the constructors and reinit()
322      * functions through the @p local_columns variable, which again has to add
323      * up to the global number of columns in the matrix. The name @p
324      * local_columns may be named inappropriately since it does not reflect
325      * that only these columns are stored locally, but it reflects the fact
326      * that these are the columns for which the elements of incoming vectors
327      * are stored locally.
328      *
329      * To make things even more complicated, PETSc needs a very good estimate
330      * of the number of elements to be stored in each row to be efficient.
331      * Otherwise it spends most of the time with allocating small chunks of
332      * memory, a process that can slow down programs to a crawl if it happens
333      * to often. As if a good estimate of the number of entries per row isn't
334      * even, it even needs to split this as follows: for each row it owns, it
335      * needs an estimate for the number of elements in this row that fall into
336      * the columns that are set apart for this process (see above), and the
337      * number of elements that are in the rest of the columns.
338      *
339      * Since in general this information is not readily available, most of the
340      * initializing functions of this class assume that all of the number of
341      * elements you give as an argument to @p n_nonzero_per_row or by @p
342      * row_lengths fall into the columns "owned" by this process, and none
343      * into the other ones. This is a fair guess for most of the rows, since
344      * in a good domain partitioning, nodes only interact with nodes that are
345      * within the same subdomain. It does not hold for nodes on the interfaces
346      * of subdomain, however, and for the rows corresponding to these nodes,
347      * PETSc will have to allocate additional memory, a costly process.
348      *
349      * The only way to avoid this is to tell PETSc where the actual entries of
350      * the matrix will be. For this, there are constructors and reinit()
351      * functions of this class that take a DynamicSparsityPattern object
352      * containing all this information. While in the general case it is
353      * sufficient if the constructors and reinit() functions know the number
354      * of local rows and columns, the functions getting a sparsity pattern
355      * also need to know the number of local rows (@p local_rows_per_process)
356      * and columns (@p local_columns_per_process) for all other processes, in
357      * order to compute which parts of the matrix are which. Thus, it is not
358      * sufficient to just count the number of degrees of freedom that belong
359      * to a particular process, but you have to have the numbers for all
360      * processes available at all processes.
361      *
362      * @ingroup PETScWrappers
363      * @ingroup Matrix1
364      */
365     class SparseMatrix : public MatrixBase
366     {
367     public:
368       /**
369        * Declare type for container size.
370        */
371       using size_type = types::global_dof_index;
372 
373       /**
374        * A structure that describes some of the traits of this class in terms
375        * of its run-time behavior. Some other classes (such as the block
376        * matrix classes) that take one or other of the matrix classes as its
377        * template parameters can tune their behavior based on the variables in
378        * this class.
379        */
380       struct Traits
381       {
382         /**
383          * It is not safe to elide additions of zeros to individual elements
384          * of this matrix. The reason is that additions to the matrix may
385          * trigger collective operations synchronizing buffers on multiple
386          * processes. If an addition is elided on one process, this may lead
387          * to other processes hanging in an infinite waiting loop.
388          */
389         static const bool zero_addition_can_be_elided = false;
390       };
391 
392       /**
393        * Default constructor. Create an empty matrix.
394        */
395       SparseMatrix();
396 
397       /**
398        * Destructor to free the PETSc object.
399        */
400       ~SparseMatrix() override;
401 
402       /**
403        * Create a sparse matrix of dimensions @p m times @p n, with an initial
404        * guess of @p n_nonzero_per_row and @p n_offdiag_nonzero_per_row
405        * nonzero elements per row (see documentation of the MatCreateAIJ PETSc
406        * function for more information about these parameters). PETSc is able
407        * to cope with the situation that more than this number of elements are
408        * later allocated for a row, but this involves copying data, and is
409        * thus expensive.
410        *
411        * For the meaning of the @p local_row and @p local_columns parameters,
412        * see the class documentation.
413        *
414        * The @p is_symmetric flag determines whether we should tell PETSc that
415        * the matrix is going to be symmetric (as indicated by the call
416        * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
417        * documentation states that one cannot form an ILU decomposition of a
418        * matrix for which this flag has been set to @p true, only an ICC. The
419        * default value of this flag is @p false.
420        *
421        * @deprecated This constructor is deprecated: please use the
422        * constructor with a sparsity pattern argument instead.
423        */
424       DEAL_II_DEPRECATED
425       SparseMatrix(const MPI_Comm &communicator,
426                    const size_type m,
427                    const size_type n,
428                    const size_type local_rows,
429                    const size_type local_columns,
430                    const size_type n_nonzero_per_row,
431                    const bool      is_symmetric              = false,
432                    const size_type n_offdiag_nonzero_per_row = 0);
433 
434       /**
435        * Initialize a rectangular matrix with @p m rows and @p n columns. The
436        * maximal number of nonzero entries for diagonal and off- diagonal
437        * blocks of each row is given by the @p row_lengths and @p
438        * offdiag_row_lengths arrays.
439        *
440        * For the meaning of the @p local_row and @p local_columns parameters,
441        * see the class documentation.
442        *
443        * Just as for the other constructors: PETSc is able to cope with the
444        * situation that more than this number of elements are later allocated
445        * for a row, but this involves copying data, and is thus expensive.
446        *
447        * The @p is_symmetric flag determines whether we should tell PETSc that
448        * the matrix is going to be symmetric (as indicated by the call
449        * <tt>MatSetOption(mat, MAT_SYMMETRIC)</tt>. Note that the PETSc
450        * documentation states that one cannot form an ILU decomposition of a
451        * matrix for which this flag has been set to @p true, only an ICC. The
452        * default value of this flag is @p false.
453        *
454        * @deprecated This constructor is deprecated: please use the
455        * constructor with a sparsity pattern argument instead.
456        */
457       DEAL_II_DEPRECATED
458       SparseMatrix(const MPI_Comm &              communicator,
459                    const size_type               m,
460                    const size_type               n,
461                    const size_type               local_rows,
462                    const size_type               local_columns,
463                    const std::vector<size_type> &row_lengths,
464                    const bool                    is_symmetric = false,
465                    const std::vector<size_type> &offdiag_row_lengths =
466                      std::vector<size_type>());
467 
468       /**
469        * Initialize using the given sparsity pattern with communication
470        * happening over the provided @p communicator.
471        *
472        * For the meaning of the @p local_rows_per_process and @p
473        * local_columns_per_process parameters, see the class documentation.
474        *
475        * Note that PETSc can be very slow if you do not provide it with a good
476        * estimate of the lengths of rows. Using the present function is a very
477        * efficient way to do this, as it uses the exact number of nonzero
478        * entries for each row of the matrix by using the given sparsity
479        * pattern argument. If the @p preset_nonzero_locations flag is @p true,
480        * this function in addition not only sets the correct row sizes up
481        * front, but also pre-allocated the correct nonzero entries in the
482        * matrix.
483        *
484        * PETsc allows to later add additional nonzero entries to a matrix, by
485        * simply writing to these elements. However, this will then lead to
486        * additional memory allocations which are very inefficient and will
487        * greatly slow down your program. It is therefore significantly more
488        * efficient to get memory allocation right from the start.
489        */
490       template <typename SparsityPatternType>
491       SparseMatrix(const MPI_Comm &              communicator,
492                    const SparsityPatternType &   sparsity_pattern,
493                    const std::vector<size_type> &local_rows_per_process,
494                    const std::vector<size_type> &local_columns_per_process,
495                    const unsigned int            this_process,
496                    const bool preset_nonzero_locations = true);
497 
498       /**
499        * This operator assigns a scalar to a matrix. Since this does usually
500        * not make much sense (should we set all matrix entries to this value?
501        * Only the nonzero entries of the sparsity pattern?), this operation is
502        * only allowed if the actual value to be assigned is zero. This
503        * operator only exists to allow for the obvious notation
504        * <tt>matrix=0</tt>, which sets all elements of the matrix to zero, but
505        * keep the sparsity pattern previously used.
506        */
507       SparseMatrix &
508       operator=(const value_type d);
509 
510 
511       /**
512        * Make a copy of the PETSc matrix @p other. It is assumed that both
513        * matrices have the same SparsityPattern.
514        */
515       void
516       copy_from(const SparseMatrix &other);
517 
518       /**
519        * Throw away the present matrix and generate one that has the same
520        * properties as if it were created by the constructor of this class
521        * with the same argument list as the present function.
522        *
523        * @deprecated This overload of <code>reinit</code> is deprecated:
524        * please use the overload with a sparsity pattern argument instead.
525        */
526       DEAL_II_DEPRECATED
527       void
528       reinit(const MPI_Comm &communicator,
529              const size_type m,
530              const size_type n,
531              const size_type local_rows,
532              const size_type local_columns,
533              const size_type n_nonzero_per_row,
534              const bool      is_symmetric              = false,
535              const size_type n_offdiag_nonzero_per_row = 0);
536 
537       /**
538        * Throw away the present matrix and generate one that has the same
539        * properties as if it were created by the constructor of this class
540        * with the same argument list as the present function.
541        *
542        * @deprecated This overload of <code>reinit</code> is deprecated:
543        * please use the overload with a sparsity pattern argument instead.
544        */
545       DEAL_II_DEPRECATED
546       void
547       reinit(const MPI_Comm &              communicator,
548              const size_type               m,
549              const size_type               n,
550              const size_type               local_rows,
551              const size_type               local_columns,
552              const std::vector<size_type> &row_lengths,
553              const bool                    is_symmetric = false,
554              const std::vector<size_type> &offdiag_row_lengths =
555                std::vector<size_type>());
556 
557       /**
558        * Initialize using the given sparsity pattern with communication
559        * happening over the provided @p communicator.
560        *
561        * Note that PETSc can be very slow if you do not provide it with a good
562        * estimate of the lengths of rows. Using the present function is a very
563        * efficient way to do this, as it uses the exact number of nonzero
564        * entries for each row of the matrix by using the given sparsity
565        * pattern argument. If the @p preset_nonzero_locations flag is @p true,
566        * this function in addition not only sets the correct row sizes up
567        * front, but also pre-allocated the correct nonzero entries in the
568        * matrix.
569        *
570        * PETsc allows to later add additional nonzero entries to a matrix, by
571        * simply writing to these elements. However, this will then lead to
572        * additional memory allocations which are very inefficient and will
573        * greatly slow down your program. It is therefore significantly more
574        * efficient to get memory allocation right from the start.
575        */
576       template <typename SparsityPatternType>
577       void
578       reinit(const MPI_Comm &              communicator,
579              const SparsityPatternType &   sparsity_pattern,
580              const std::vector<size_type> &local_rows_per_process,
581              const std::vector<size_type> &local_columns_per_process,
582              const unsigned int            this_process,
583              const bool                    preset_nonzero_locations = true);
584 
585       /**
586        * Create a matrix where the size() of the IndexSets determine the
587        * global number of rows and columns and the entries of the IndexSet
588        * give the rows and columns for the calling processor. Note that only
589        * ascending, 1:1 IndexSets are supported.
590        */
591       template <typename SparsityPatternType>
592       void
593       reinit(const IndexSet &           local_rows,
594              const IndexSet &           local_columns,
595              const SparsityPatternType &sparsity_pattern,
596              const MPI_Comm &           communicator);
597 
598       /**
599        * Initialize this matrix to have the same structure as @p other. This
600        * will not copy the values of the other matrix, but you can use
601        * copy_from() for this.
602        */
603       void
604       reinit(const SparseMatrix &other);
605 
606       /**
607        * Return a reference to the MPI communicator object in use with this
608        * matrix.
609        */
610       virtual const MPI_Comm &
611       get_mpi_communicator() const override;
612 
613       /**
614        * @addtogroup Exceptions
615        * @{
616        */
617       /**
618        * Exception
619        */
620       DeclException2(ExcLocalRowsTooLarge,
621                      int,
622                      int,
623                      << "The number of local rows " << arg1
624                      << " must be larger than the total number of rows "
625                      << arg2);
626       //@}
627 
628       /**
629        * Return the square of the norm of the vector $v$ with respect to the
630        * norm induced by this matrix, i.e. $\left(v^\ast,Mv\right)$. This is
631        * useful, e.g. in the finite element context, where the $L_2$ norm of a
632        * function equals the matrix norm with respect to the mass matrix of
633        * the vector representing the nodal values of the finite element
634        * function.
635        *
636        * Obviously, the matrix needs to be quadratic for this operation.
637        *
638        * The implementation of this function is not as efficient as the one in
639        * the @p MatrixBase class used in deal.II (i.e. the original one, not
640        * the PETSc wrapper class) since PETSc doesn't support this operation
641        * and needs a temporary vector.
642        */
643       PetscScalar
644       matrix_norm_square(const Vector &v) const;
645 
646       /**
647        * Compute the matrix scalar product $\left(u^\ast,Mv\right)$.
648        *
649        * The implementation of this function is not as efficient as the one in
650        * the @p MatrixBase class used in deal.II (i.e. the original one, not
651        * the PETSc wrapper class) since PETSc doesn't support this operation
652        * and needs a temporary vector.
653        */
654       PetscScalar
655       matrix_scalar_product(const Vector &u, const Vector &v) const;
656 
657       /**
658        * Return the partitioning of the domain space of this matrix, i.e., the
659        * partitioning of the vectors this matrix has to be multiplied with.
660        */
661       IndexSet
662       locally_owned_domain_indices() const;
663 
664       /**
665        * Return the partitioning of the range space of this matrix, i.e., the
666        * partitioning of the vectors that result from matrix-vector
667        * products.
668        */
669       IndexSet
670       locally_owned_range_indices() const;
671 
672       /**
673        * Perform the matrix-matrix multiplication $C = AB$, or,
674        * $C = A \text{diag}(V) B$ given a compatible vector $V$.
675        *
676        * This function calls MatrixBase::mmult() to do the actual work.
677        */
678       void
679       mmult(SparseMatrix &      C,
680             const SparseMatrix &B,
681             const MPI::Vector & V = MPI::Vector()) const;
682 
683       /**
684        * Perform the matrix-matrix multiplication with the transpose of
685        * <tt>this</tt>, i.e., $C = A^T B$, or,
686        * $C = A^T \text{diag}(V) B$ given a compatible vector $V$.
687        *
688        * This function calls MatrixBase::Tmmult() to do the actual work.
689        */
690       void
691       Tmmult(SparseMatrix &      C,
692              const SparseMatrix &B,
693              const MPI::Vector & V = MPI::Vector()) const;
694 
695     private:
696       /**
697        * Copy of the communicator object to be used for this parallel vector.
698        */
699       MPI_Comm communicator;
700 
701       /**
702        * Do the actual work for the respective reinit() function and the
703        * matching constructor, i.e. create a matrix. Getting rid of the
704        * previous matrix is left to the caller.
705        *
706        * @deprecated This overload of <code>do_reinit</code> is deprecated:
707        * please use the overload with a sparsity pattern argument instead.
708        */
709       DEAL_II_DEPRECATED
710       void
711       do_reinit(const size_type m,
712                 const size_type n,
713                 const size_type local_rows,
714                 const size_type local_columns,
715                 const size_type n_nonzero_per_row,
716                 const bool      is_symmetric              = false,
717                 const size_type n_offdiag_nonzero_per_row = 0);
718 
719       /**
720        * Same as previous function.
721        *
722        * @deprecated This overload of <code>do_reinit</code> is deprecated:
723        * please use the overload with a sparsity pattern argument instead.
724        */
725       DEAL_II_DEPRECATED
726       void
727       do_reinit(const size_type               m,
728                 const size_type               n,
729                 const size_type               local_rows,
730                 const size_type               local_columns,
731                 const std::vector<size_type> &row_lengths,
732                 const bool                    is_symmetric = false,
733                 const std::vector<size_type> &offdiag_row_lengths =
734                   std::vector<size_type>());
735 
736       /**
737        * Same as previous functions.
738        */
739       template <typename SparsityPatternType>
740       void
741       do_reinit(const SparsityPatternType &   sparsity_pattern,
742                 const std::vector<size_type> &local_rows_per_process,
743                 const std::vector<size_type> &local_columns_per_process,
744                 const unsigned int            this_process,
745                 const bool                    preset_nonzero_locations);
746 
747       /**
748        * Same as previous functions.
749        */
750       template <typename SparsityPatternType>
751       void
752       do_reinit(const IndexSet &           local_rows,
753                 const IndexSet &           local_columns,
754                 const SparsityPatternType &sparsity_pattern);
755 
756       // To allow calling protected prepare_add() and prepare_set().
757       friend class BlockMatrixBase<SparseMatrix>;
758     };
759 
760 
761 
762     // -------- template and inline functions ----------
763 
764     inline const MPI_Comm &
get_mpi_communicator()765     SparseMatrix::get_mpi_communicator() const
766     {
767       return communicator;
768     }
769   } // namespace MPI
770 } // namespace PETScWrappers
771 
772 DEAL_II_NAMESPACE_CLOSE
773 
774 #  endif // DEAL_II_WITH_PETSC
775 
776 #endif
777 /*--------------------------- petsc_sparse_matrix.h -------------------------*/
778