1 // ---------------------------------------------------------------------
2 //
3 // Copyright (C) 2004 - 2019 by the deal.II authors
4 //
5 // This file is part of the deal.II library.
6 //
7 // The deal.II library is free software; you can use it, redistribute
8 // it, and/or modify it under the terms of the GNU Lesser General
9 // Public License as published by the Free Software Foundation; either
10 // version 2.1 of the License, or (at your option) any later version.
11 // The full text of the license can be found in the file LICENSE.md at
12 // the top level directory of deal.II.
13 //
14 // ---------------------------------------------------------------------
15 
16 #ifndef dealii_petsc_matrix_base_h
17 #  define dealii_petsc_matrix_base_h
18 
19 
20 #  include <deal.II/base/config.h>
21 
22 #  ifdef DEAL_II_WITH_PETSC
23 
24 #    include <deal.II/base/subscriptor.h>
25 
26 #    include <deal.II/lac/exceptions.h>
27 #    include <deal.II/lac/full_matrix.h>
28 #    include <deal.II/lac/petsc_compatibility.h>
29 #    include <deal.II/lac/petsc_vector_base.h>
30 #    include <deal.II/lac/vector_operation.h>
31 
32 #    include <petscmat.h>
33 
34 #    include <cmath>
35 #    include <memory>
36 #    include <vector>
37 
38 DEAL_II_NAMESPACE_OPEN
39 
40 // Forward declarations
41 #    ifndef DOXYGEN
42 template <typename Matrix>
43 class BlockMatrixBase;
44 #    endif
45 
46 
47 namespace PETScWrappers
48 {
49   // forward declarations
50   class MatrixBase;
51 
52   namespace MatrixIterators
53   {
54     /**
55      * This class acts as an iterator walking over the elements of PETSc
56      * matrices. Since PETSc offers a uniform interface for all types of
57      * matrices, this iterator can be used to access both sparse and full
58      * matrices.
59      *
60      * Note that PETSc does not give any guarantees as to the order of
61      * elements within each row. Note also that accessing the elements of a
62      * full matrix surprisingly only shows the nonzero elements of the matrix,
63      * not all elements.
64      *
65      * @ingroup PETScWrappers
66      */
67     class const_iterator
68     {
69     private:
70       /**
71        * Accessor class for iterators
72        */
73       class Accessor
74       {
75       public:
76         /**
77          * Declare type for container size.
78          */
79         using size_type = types::global_dof_index;
80 
81         /**
82          * Constructor. Since we use accessors only for read access, a const
83          * matrix pointer is sufficient.
84          */
85         Accessor(const MatrixBase *matrix,
86                  const size_type   row,
87                  const size_type   index);
88 
89         /**
90          * Row number of the element represented by this object.
91          */
92         size_type
93         row() const;
94 
95         /**
96          * Index in row of the element represented by this object.
97          */
98         size_type
99         index() const;
100 
101         /**
102          * Column number of the element represented by this object.
103          */
104         size_type
105         column() const;
106 
107         /**
108          * Value of this matrix entry.
109          */
110         PetscScalar
111         value() const;
112 
113         /**
114          * Exception
115          */
116         DeclException0(ExcBeyondEndOfMatrix);
117         /**
118          * Exception
119          */
120         DeclException3(ExcAccessToNonlocalRow,
121                        int,
122                        int,
123                        int,
124                        << "You tried to access row " << arg1
125                        << " of a distributed matrix, but only rows " << arg2
126                        << " through " << arg3
127                        << " are stored locally and can be accessed.");
128 
129       private:
130         /**
131          * The matrix accessed.
132          */
133         mutable MatrixBase *matrix;
134 
135         /**
136          * Current row number.
137          */
138         size_type a_row;
139 
140         /**
141          * Current index in row.
142          */
143         size_type a_index;
144 
145         /**
146          * Cache where we store the column indices of the present row. This is
147          * necessary, since PETSc makes access to the elements of its matrices
148          * rather hard, and it is much more efficient to copy all column
149          * entries of a row once when we enter it than repeatedly asking PETSc
150          * for individual ones. This also makes some sense since it is likely
151          * that we will access them sequentially anyway.
152          *
153          * In order to make copying of iterators/accessor of acceptable
154          * performance, we keep a shared pointer to these entries so that more
155          * than one accessor can access this data if necessary.
156          */
157         std::shared_ptr<const std::vector<size_type>> colnum_cache;
158 
159         /**
160          * Similar cache for the values of this row.
161          */
162         std::shared_ptr<const std::vector<PetscScalar>> value_cache;
163 
164         /**
165          * Discard the old row caches (they may still be used by other
166          * accessors) and generate new ones for the row pointed to presently
167          * by this accessor.
168          */
169         void
170         visit_present_row();
171 
172         // Make enclosing class a friend.
173         friend class const_iterator;
174       };
175 
176     public:
177       /**
178        * Declare type for container size.
179        */
180       using size_type = types::global_dof_index;
181 
182       /**
183        * Constructor. Create an iterator into the matrix @p matrix for the
184        * given row and the index within it.
185        */
186       const_iterator(const MatrixBase *matrix,
187                      const size_type   row,
188                      const size_type   index);
189 
190       /**
191        * Prefix increment.
192        */
193       const_iterator &
194       operator++();
195 
196       /**
197        * Postfix increment.
198        */
199       const_iterator
200       operator++(int);
201 
202       /**
203        * Dereferencing operator.
204        */
205       const Accessor &operator*() const;
206 
207       /**
208        * Dereferencing operator.
209        */
210       const Accessor *operator->() const;
211 
212       /**
213        * Comparison. True, if both iterators point to the same matrix
214        * position.
215        */
216       bool
217       operator==(const const_iterator &) const;
218       /**
219        * Inverse of <tt>==</tt>.
220        */
221       bool
222       operator!=(const const_iterator &) const;
223 
224       /**
225        * Comparison operator. Result is true if either the first row number is
226        * smaller or if the row numbers are equal and the first index is
227        * smaller.
228        */
229       bool
230       operator<(const const_iterator &) const;
231 
232       /**
233        * Exception
234        */
235       DeclException2(ExcInvalidIndexWithinRow,
236                      int,
237                      int,
238                      << "Attempt to access element " << arg2 << " of row "
239                      << arg1 << " which doesn't have that many elements.");
240 
241     private:
242       /**
243        * Store an object of the accessor class.
244        */
245       Accessor accessor;
246     };
247 
248   } // namespace MatrixIterators
249 
250 
251   /**
252    * Base class for all matrix classes that are implemented on top of the
253    * PETSc matrix types. Since in PETSc all matrix types (i.e. sequential and
254    * parallel, sparse, blocked, etc.)  are built by filling the contents of an
255    * abstract object that is only referenced through a pointer of a type that
256    * is independent of the actual matrix type, we can implement almost all
257    * functionality of matrices in this base class. Derived classes will then
258    * only have to provide the functionality to create one or the other kind of
259    * matrix.
260    *
261    * The interface of this class is modeled after the existing SparseMatrix
262    * class in deal.II. It has almost the same member functions, and is often
263    * exchangeable. However, since PETSc only supports a single scalar type
264    * (either double, float, or a complex data type), it is not templated, and
265    * only works with whatever your PETSc installation has defined the data
266    * type PetscScalar to.
267    *
268    * Note that PETSc only guarantees that operations do what you expect if the
269    * functions @p MatAssemblyBegin and @p MatAssemblyEnd have been called
270    * after matrix assembly. Therefore, you need to call
271    * SparseMatrix::compress() before you actually use the matrix. This also
272    * calls @p MatCompress that compresses the storage format for sparse
273    * matrices by discarding unused elements. PETSc allows to continue with
274    * assembling the matrix after calls to these functions, but since there are
275    * no more free entries available after that any more, it is better to only
276    * call SparseMatrix::compress() once at the end of the assembly stage and
277    * before the matrix is actively used.
278    *
279    * @ingroup PETScWrappers
280    * @ingroup Matrix1
281    */
282   class MatrixBase : public Subscriptor
283   {
284   public:
285     /**
286      * Declare an alias for the iterator class.
287      */
288     using const_iterator = MatrixIterators::const_iterator;
289 
290     /**
291      * Declare type for container size.
292      */
293     using size_type = types::global_dof_index;
294 
295     /**
296      * Declare an alias in analogy to all the other container classes.
297      */
298     using value_type = PetscScalar;
299 
300     /**
301      * Default constructor.
302      */
303     MatrixBase();
304 
305     /**
306      * Copy constructor. It is deleted as copying this base class
307      * without knowing the concrete kind of matrix stored may both
308      * miss important details and be expensive if the matrix is large.
309      */
310     MatrixBase(const MatrixBase &) = delete;
311 
312     /**
313      * Copy operator. It is deleted as copying this base class
314      * without knowing the concrete kind of matrix stored may both
315      * miss important details and be expensive if the matrix is large.
316      */
317     MatrixBase &
318     operator=(const MatrixBase &) = delete;
319 
320     /**
321      * Destructor. Made virtual so that one can use pointers to this class.
322      */
323     virtual ~MatrixBase() override;
324 
325     /**
326      * This operator assigns a scalar to a matrix. Since this does usually not
327      * make much sense (should we set all matrix entries to this value? Only
328      * the nonzero entries of the sparsity pattern?), this operation is only
329      * allowed if the actual value to be assigned is zero. This operator only
330      * exists to allow for the obvious notation <tt>matrix=0</tt>, which sets
331      * all elements of the matrix to zero, but keeps the sparsity pattern
332      * previously used.
333      */
334     MatrixBase &
335     operator=(const value_type d);
336     /**
337      * Release all memory and return to a state just like after having called
338      * the default constructor.
339      */
340     void
341     clear();
342 
343     /**
344      * Set the element (<i>i,j</i>) to @p value.
345      *
346      * If the present object (from a derived class of this one) happens to be
347      * a sparse matrix, then this function adds a new entry to the matrix if
348      * it didn't exist before, very much in contrast to the SparseMatrix class
349      * which throws an error if the entry does not exist. If <tt>value</tt> is
350      * not a finite number an exception is thrown.
351      */
352     void
353     set(const size_type i, const size_type j, const PetscScalar value);
354 
355     /**
356      * Set all elements given in a FullMatrix<double> into the sparse matrix
357      * locations given by <tt>indices</tt>. In other words, this function
358      * writes the elements in <tt>full_matrix</tt> into the calling matrix,
359      * using the local-to-global indexing specified by <tt>indices</tt> for
360      * both the rows and the columns of the matrix. This function assumes a
361      * quadratic sparse matrix and a quadratic full_matrix, the usual
362      * situation in FE calculations.
363      *
364      * If the present object (from a derived class of this one) happens to be
365      * a sparse matrix, then this function adds some new entries to the matrix
366      * if they didn't exist before, very much in contrast to the SparseMatrix
367      * class which throws an error if the entry does not exist.
368      *
369      * The optional parameter <tt>elide_zero_values</tt> can be used to
370      * specify whether zero values should be inserted anyway or they should be
371      * filtered away. The default value is <tt>false</tt>, i.e., even zero
372      * values are inserted/replaced.
373      */
374     void
375     set(const std::vector<size_type> & indices,
376         const FullMatrix<PetscScalar> &full_matrix,
377         const bool                     elide_zero_values = false);
378 
379     /**
380      * Same function as before, but now including the possibility to use
381      * rectangular full_matrices and different local-to-global indexing on
382      * rows and columns, respectively.
383      */
384     void
385     set(const std::vector<size_type> & row_indices,
386         const std::vector<size_type> & col_indices,
387         const FullMatrix<PetscScalar> &full_matrix,
388         const bool                     elide_zero_values = false);
389 
390     /**
391      * Set several elements in the specified row of the matrix with column
392      * indices as given by <tt>col_indices</tt> to the respective value.
393      *
394      * If the present object (from a derived class of this one) happens to be
395      * a sparse matrix, then this function adds some new entries to the matrix
396      * if they didn't exist before, very much in contrast to the SparseMatrix
397      * class which throws an error if the entry does not exist.
398      *
399      * The optional parameter <tt>elide_zero_values</tt> can be used to
400      * specify whether zero values should be inserted anyway or they should be
401      * filtered away. The default value is <tt>false</tt>, i.e., even zero
402      * values are inserted/replaced.
403      */
404     void
405     set(const size_type                 row,
406         const std::vector<size_type> &  col_indices,
407         const std::vector<PetscScalar> &values,
408         const bool                      elide_zero_values = false);
409 
410     /**
411      * Set several elements to values given by <tt>values</tt> in a given row
412      * in columns given by col_indices into the sparse matrix.
413      *
414      * If the present object (from a derived class of this one) happens to be
415      * a sparse matrix, then this function adds some new entries to the matrix
416      * if they didn't exist before, very much in contrast to the SparseMatrix
417      * class which throws an error if the entry does not exist.
418      *
419      * The optional parameter <tt>elide_zero_values</tt> can be used to
420      * specify whether zero values should be inserted anyway or they should be
421      * filtered away. The default value is <tt>false</tt>, i.e., even zero
422      * values are inserted/replaced.
423      */
424     void
425     set(const size_type    row,
426         const size_type    n_cols,
427         const size_type *  col_indices,
428         const PetscScalar *values,
429         const bool         elide_zero_values = false);
430 
431     /**
432      * Add @p value to the element (<i>i,j</i>).
433      *
434      * If the present object (from a derived class of this one) happens to be
435      * a sparse matrix, then this function adds a new entry to the matrix if
436      * it didn't exist before, very much in contrast to the SparseMatrix class
437      * which throws an error if the entry does not exist. If <tt>value</tt> is
438      * not a finite number an exception is thrown.
439      */
440     void
441     add(const size_type i, const size_type j, const PetscScalar value);
442 
443     /**
444      * Add all elements given in a FullMatrix<double> into sparse matrix
445      * locations given by <tt>indices</tt>. In other words, this function adds
446      * the elements in <tt>full_matrix</tt> to the respective entries in
447      * calling matrix, using the local-to-global indexing specified by
448      * <tt>indices</tt> for both the rows and the columns of the matrix. This
449      * function assumes a quadratic sparse matrix and a quadratic full_matrix,
450      * the usual situation in FE calculations.
451      *
452      * If the present object (from a derived class of this one) happens to be
453      * a sparse matrix, then this function adds some new entries to the matrix
454      * if they didn't exist before, very much in contrast to the SparseMatrix
455      * class which throws an error if the entry does not exist.
456      *
457      * The optional parameter <tt>elide_zero_values</tt> can be used to
458      * specify whether zero values should be added anyway or these should be
459      * filtered away and only non-zero data is added. The default value is
460      * <tt>true</tt>, i.e., zero values won't be added into the matrix.
461      */
462     void
463     add(const std::vector<size_type> & indices,
464         const FullMatrix<PetscScalar> &full_matrix,
465         const bool                     elide_zero_values = true);
466 
467     /**
468      * Same function as before, but now including the possibility to use
469      * rectangular full_matrices and different local-to-global indexing on
470      * rows and columns, respectively.
471      */
472     void
473     add(const std::vector<size_type> & row_indices,
474         const std::vector<size_type> & col_indices,
475         const FullMatrix<PetscScalar> &full_matrix,
476         const bool                     elide_zero_values = true);
477 
478     /**
479      * Set several elements in the specified row of the matrix with column
480      * indices as given by <tt>col_indices</tt> to the respective value.
481      *
482      * If the present object (from a derived class of this one) happens to be
483      * a sparse matrix, then this function adds some new entries to the matrix
484      * if they didn't exist before, very much in contrast to the SparseMatrix
485      * class which throws an error if the entry does not exist.
486      *
487      * The optional parameter <tt>elide_zero_values</tt> can be used to
488      * specify whether zero values should be added anyway or these should be
489      * filtered away and only non-zero data is added. The default value is
490      * <tt>true</tt>, i.e., zero values won't be added into the matrix.
491      */
492     void
493     add(const size_type                 row,
494         const std::vector<size_type> &  col_indices,
495         const std::vector<PetscScalar> &values,
496         const bool                      elide_zero_values = true);
497 
498     /**
499      * Add an array of values given by <tt>values</tt> in the given global
500      * matrix row at columns specified by col_indices in the sparse matrix.
501      *
502      * If the present object (from a derived class of this one) happens to be
503      * a sparse matrix, then this function adds some new entries to the matrix
504      * if they didn't exist before, very much in contrast to the SparseMatrix
505      * class which throws an error if the entry does not exist.
506      *
507      * The optional parameter <tt>elide_zero_values</tt> can be used to
508      * specify whether zero values should be added anyway or these should be
509      * filtered away and only non-zero data is added. The default value is
510      * <tt>true</tt>, i.e., zero values won't be added into the matrix.
511      */
512     void
513     add(const size_type    row,
514         const size_type    n_cols,
515         const size_type *  col_indices,
516         const PetscScalar *values,
517         const bool         elide_zero_values      = true,
518         const bool         col_indices_are_sorted = false);
519 
520     /**
521      * Remove all elements from this <tt>row</tt> by setting them to zero. The
522      * function does not modify the number of allocated nonzero entries, it
523      * only sets some entries to zero. It may drop them from the sparsity
524      * pattern, though (but retains the allocated memory in case new entries
525      * are again added later).
526      *
527      * This operation is used in eliminating constraints (e.g. due to hanging
528      * nodes) and makes sure that we can write this modification to the matrix
529      * without having to read entries (such as the locations of non-zero
530      * elements) from it -- without this operation, removing constraints on
531      * parallel matrices is a rather complicated procedure.
532      *
533      * The second parameter can be used to set the diagonal entry of this row
534      * to a value different from zero. The default is to set it to zero.
535      */
536     void
537     clear_row(const size_type row, const PetscScalar new_diag_value = 0);
538 
539     /**
540      * Same as clear_row(), except that it works on a number of rows at once.
541      *
542      * The second parameter can be used to set the diagonal entries of all
543      * cleared rows to something different from zero. Note that all of these
544      * diagonal entries get the same value -- if you want different values for
545      * the diagonal entries, you have to set them by hand.
546      */
547     void
548     clear_rows(const std::vector<size_type> &rows,
549                const PetscScalar             new_diag_value = 0);
550 
551     /**
552      * PETSc matrices store their own sparsity patterns. So, in analogy to our
553      * own SparsityPattern class, this function compresses the sparsity
554      * pattern and allows the resulting matrix to be used in all other
555      * operations where before only assembly functions were allowed. This
556      * function must therefore be called once you have assembled the matrix.
557      *
558      * See
559      * @ref GlossCompress "Compressing distributed objects"
560      * for more information.
561      */
562     void
563     compress(const VectorOperation::values operation);
564 
565     /**
566      * Return the value of the entry (<i>i,j</i>).  This may be an expensive
567      * operation and you should always take care where to call this function.
568      * In contrast to the respective function in the @p MatrixBase class, we
569      * don't throw an exception if the respective entry doesn't exist in the
570      * sparsity pattern of this class, since PETSc does not transmit this
571      * information.
572      *
573      * This function is therefore exactly equivalent to the <tt>el()</tt>
574      * function.
575      */
576     PetscScalar
577     operator()(const size_type i, const size_type j) const;
578 
579     /**
580      * Return the value of the matrix entry (<i>i,j</i>). If this entry does
581      * not exist in the sparsity pattern, then zero is returned. While this
582      * may be convenient in some cases, note that it is simple to write
583      * algorithms that are slow compared to an optimal solution, since the
584      * sparsity of the matrix is not used.
585      */
586     PetscScalar
587     el(const size_type i, const size_type j) const;
588 
589     /**
590      * Return the main diagonal element in the <i>i</i>th row. This function
591      * throws an error if the matrix is not quadratic.
592      *
593      * Since we do not have direct access to the underlying data structure,
594      * this function is no faster than the elementwise access using the el()
595      * function. However, we provide this function for compatibility with the
596      * SparseMatrix class.
597      */
598     PetscScalar
599     diag_element(const size_type i) const;
600 
601     /**
602      * Return the number of rows in this matrix.
603      */
604     size_type
605     m() const;
606 
607     /**
608      * Return the number of columns in this matrix.
609      */
610     size_type
611     n() const;
612 
613     /**
614      * Return the local dimension of the matrix, i.e. the number of rows
615      * stored on the present MPI process. For sequential matrices, this number
616      * is the same as m(), but for parallel matrices it may be smaller.
617      *
618      * To figure out which elements exactly are stored locally, use
619      * local_range().
620      */
621     size_type
622     local_size() const;
623 
624     /**
625      * Return a pair of indices indicating which rows of this matrix are
626      * stored locally. The first number is the index of the first row stored,
627      * the second the index of the one past the last one that is stored
628      * locally. If this is a sequential matrix, then the result will be the
629      * pair (0,m()), otherwise it will be a pair (i,i+n), where
630      * <tt>n=local_size()</tt>.
631      */
632     std::pair<size_type, size_type>
633     local_range() const;
634 
635     /**
636      * Return whether @p index is in the local range or not, see also
637      * local_range().
638      */
639     bool
640     in_local_range(const size_type index) const;
641 
642     /**
643      * Return a reference to the MPI communicator object in use with this
644      * matrix. This function has to be implemented in derived classes.
645      */
646     virtual const MPI_Comm &
647     get_mpi_communicator() const = 0;
648 
649     /**
650      * Return the number of nonzero elements of this matrix. Actually, it
651      * returns the number of entries in the sparsity pattern; if any of the
652      * entries should happen to be zero, it is counted anyway.
653      */
654     size_type
655     n_nonzero_elements() const;
656 
657     /**
658      * Number of entries in a specific row.
659      */
660     size_type
661     row_length(const size_type row) const;
662 
663     /**
664      * Return the l1-norm of the matrix, that is $|M|_1=max_{all columns
665      * j}\sum_{all rows i} |M_ij|$, (max. sum of columns). This is the natural
666      * matrix norm that is compatible to the l1-norm for vectors, i.e.
667      * $|Mv|_1\leq |M|_1 |v|_1$. (cf. Haemmerlin-Hoffmann: Numerische
668      * Mathematik)
669      */
670     PetscReal
671     l1_norm() const;
672 
673     /**
674      * Return the linfty-norm of the matrix, that is $|M|_infty=max_{all rows
675      * i}\sum_{all columns j} |M_ij|$, (max. sum of rows). This is the natural
676      * matrix norm that is compatible to the linfty-norm of vectors, i.e.
677      * $|Mv|_infty \leq |M|_infty |v|_infty$. (cf. Haemmerlin-Hoffmann:
678      * Numerische Mathematik)
679      */
680     PetscReal
681     linfty_norm() const;
682 
683     /**
684      * Return the frobenius norm of the matrix, i.e. the square root of the
685      * sum of squares of all entries in the matrix.
686      */
687     PetscReal
688     frobenius_norm() const;
689 
690 
691     /**
692      * Return the square of the norm of the vector $v$ with respect to the
693      * norm induced by this matrix, i.e. $\left(v,Mv\right)$. This is useful,
694      * e.g. in the finite element context, where the $L_2$ norm of a function
695      * equals the matrix norm with respect to the mass matrix of the vector
696      * representing the nodal values of the finite element function.
697      *
698      * Obviously, the matrix needs to be quadratic for this operation.
699      *
700      * The implementation of this function is not as efficient as the one in
701      * the @p MatrixBase class used in deal.II (i.e. the original one, not the
702      * PETSc wrapper class) since PETSc doesn't support this operation and
703      * needs a temporary vector.
704      *
705      * Note that if the current object represents a parallel distributed
706      * matrix (of type PETScWrappers::MPI::SparseMatrix), then the given
707      * vector has to be a distributed vector as well. Conversely, if the
708      * matrix is not distributed, then neither may the vector be.
709      */
710     PetscScalar
711     matrix_norm_square(const VectorBase &v) const;
712 
713 
714     /**
715      * Compute the matrix scalar product $\left(u,Mv\right)$.
716      *
717      * The implementation of this function is not as efficient as the one in
718      * the @p MatrixBase class used in deal.II (i.e. the original one, not the
719      * PETSc wrapper class) since PETSc doesn't support this operation and
720      * needs a temporary vector.
721      *
722      * Note that if the current object represents a parallel distributed
723      * matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors
724      * have to be distributed vectors as well. Conversely, if the matrix is
725      * not distributed, then neither of the vectors may be.
726      */
727     PetscScalar
728     matrix_scalar_product(const VectorBase &u, const VectorBase &v) const;
729 
730     /**
731      * Return the trace of the matrix, i.e. the sum of all diagonal entries in
732      * the matrix.
733      */
734     PetscScalar
735     trace() const;
736 
737     /**
738      * Multiply the entire matrix by a fixed factor.
739      */
740     MatrixBase &
741     operator*=(const PetscScalar factor);
742 
743     /**
744      * Divide the entire matrix by a fixed factor.
745      */
746     MatrixBase &
747     operator/=(const PetscScalar factor);
748 
749 
750     /**
751      * Add the matrix @p other scaled by the factor @p factor to the current
752      * matrix.
753      */
754     MatrixBase &
755     add(const PetscScalar factor, const MatrixBase &other);
756 
757 
758     /**
759      * Add the matrix @p other scaled by the factor @p factor to the current
760      * matrix.
761      * @deprecated Use the function with order of arguments reversed instead.
762      */
763     DEAL_II_DEPRECATED
764     MatrixBase &
765     add(const MatrixBase &other, const PetscScalar factor);
766 
767     /**
768      * Matrix-vector multiplication: let <i>dst = M*src</i> with <i>M</i>
769      * being this matrix.
770      *
771      * Source and destination must not be the same vector.
772      *
773      * Note that if the current object represents a parallel distributed
774      * matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors
775      * have to be distributed vectors as well. Conversely, if the matrix is
776      * not distributed, then neither of the vectors may be.
777      */
778     void
779     vmult(VectorBase &dst, const VectorBase &src) const;
780 
781     /**
782      * Matrix-vector multiplication: let <i>dst = M<sup>T</sup>*src</i> with
783      * <i>M</i> being this matrix. This function does the same as vmult() but
784      * takes the transposed matrix.
785      *
786      * Source and destination must not be the same vector.
787      *
788      * Note that if the current object represents a parallel distributed
789      * matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors
790      * have to be distributed vectors as well. Conversely, if the matrix is
791      * not distributed, then neither of the vectors may be.
792      */
793     void
794     Tvmult(VectorBase &dst, const VectorBase &src) const;
795 
796     /**
797      * Adding Matrix-vector multiplication. Add <i>M*src</i> on <i>dst</i>
798      * with <i>M</i> being this matrix.
799      *
800      * Source and destination must not be the same vector.
801      *
802      * Note that if the current object represents a parallel distributed
803      * matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors
804      * have to be distributed vectors as well. Conversely, if the matrix is
805      * not distributed, then neither of the vectors may be.
806      */
807     void
808     vmult_add(VectorBase &dst, const VectorBase &src) const;
809 
810     /**
811      * Adding Matrix-vector multiplication. Add <i>M<sup>T</sup>*src</i> to
812      * <i>dst</i> with <i>M</i> being this matrix. This function does the same
813      * as vmult_add() but takes the transposed matrix.
814      *
815      * Source and destination must not be the same vector.
816      *
817      * Note that if the current object represents a parallel distributed
818      * matrix (of type PETScWrappers::MPI::SparseMatrix), then both vectors
819      * have to be distributed vectors as well. Conversely, if the matrix is
820      * not distributed, then neither of the vectors may be.
821      */
822     void
823     Tvmult_add(VectorBase &dst, const VectorBase &src) const;
824 
825     /**
826      * Compute the residual of an equation <i>Mx=b</i>, where the residual is
827      * defined to be <i>r=b-Mx</i>. Write the residual into @p dst. The
828      * <i>l<sub>2</sub></i> norm of the residual vector is returned.
829      *
830      * Source <i>x</i> and destination <i>dst</i> must not be the same vector.
831      *
832      * Note that if the current object represents a parallel distributed
833      * matrix (of type PETScWrappers::MPI::SparseMatrix), then all vectors
834      * have to be distributed vectors as well. Conversely, if the matrix is
835      * not distributed, then neither of the vectors may be.
836      */
837     PetscScalar
838     residual(VectorBase &dst, const VectorBase &x, const VectorBase &b) const;
839 
840     /**
841      * Iterator starting at the first entry. This can only be called on a
842      * processor owning the entire matrix. In all other cases refer to the
843      * version of begin() taking a row number as an argument.
844      */
845     const_iterator
846     begin() const;
847 
848     /**
849      * Final iterator. This can only be called on a processor owning the entire
850      * matrix. In all other cases refer to the version of end() taking a row
851      * number as an argument.
852      */
853     const_iterator
854     end() const;
855 
856     /**
857      * Iterator starting at the first entry of row @p r.
858      *
859      * Note that if the given row is empty, i.e. does not contain any nonzero
860      * entries, then the iterator returned by this function equals
861      * <tt>end(r)</tt>. Note also that the iterator may not be dereferenceable
862      * in that case.
863      */
864     const_iterator
865     begin(const size_type r) const;
866 
867     /**
868      * Final iterator of row <tt>r</tt>. It points to the first element past
869      * the end of line @p r, or past the end of the entire sparsity pattern.
870      *
871      * Note that the end iterator is not necessarily dereferenceable. This is
872      * in particular the case if it is the end iterator for the last row of a
873      * matrix.
874      */
875     const_iterator
876     end(const size_type r) const;
877 
878     /**
879      * Conversion operator to gain access to the underlying PETSc type. If you
880      * do this, you cut this class off some information it may need, so this
881      * conversion operator should only be used if you know what you do. In
882      * particular, it should only be used for read-only operations into the
883      * matrix.
884      */
885     operator Mat() const;
886 
887     /**
888      * Return a reference to the underlying PETSc type. It can be used to
889      * modify the underlying data, so use it only when you know what you
890      * are doing.
891      */
892     Mat &
893     petsc_matrix();
894 
895     /**
896      * Make an in-place transpose of a matrix.
897      */
898     void
899     transpose();
900 
901     /**
902      * Test whether a matrix is symmetric.  Default tolerance is
903      * $1000\times32$-bit machine precision.
904      */
905     PetscBool
906     is_symmetric(const double tolerance = 1.e-12);
907 
908     /**
909      * Test whether a matrix is Hermitian, i.e. it is the complex conjugate of
910      * its transpose. Default tolerance is $1000\times32$-bit machine
911      * precision.
912      */
913     PetscBool
914     is_hermitian(const double tolerance = 1.e-12);
915 
916     /**
917      * Print the PETSc matrix object values using PETSc internal matrix viewer
918      * function <tt>MatView</tt>. The default format prints the non- zero
919      * matrix elements. For other valid view formats, consult
920      * http://www.mcs.anl.gov/petsc/petsc-current/docs/manualpages/Mat/MatView.html
921      */
922     void
923     write_ascii(const PetscViewerFormat format = PETSC_VIEWER_DEFAULT);
924 
925     /**
926      * Print the elements of a matrix to the given output stream.
927      *
928      * @param[in,out] out The output stream to which to write.
929      * @param[in] alternative_output This argument is ignored. It exists for
930      * compatibility with similar functions in other matrix classes.
931      */
932     void
933     print(std::ostream &out, const bool alternative_output = false) const;
934 
935     /**
936      * Return the number bytes consumed by this matrix on this CPU.
937      */
938     std::size_t
939     memory_consumption() const;
940 
941     /**
942      * Exception
943      */
944     DeclExceptionMsg(ExcSourceEqualsDestination,
945                      "You are attempting an operation on two matrices that "
946                      "are the same object, but the operation requires that the "
947                      "two objects are in fact different.");
948 
949     /**
950      * Exception.
951      */
952     DeclException2(ExcWrongMode,
953                    int,
954                    int,
955                    << "You tried to do a "
956                    << (arg1 == 1 ? "'set'" : (arg1 == 2 ? "'add'" : "???"))
957                    << " operation but the matrix is currently in "
958                    << (arg2 == 1 ? "'set'" : (arg2 == 2 ? "'add'" : "???"))
959                    << " mode. You first have to call 'compress()'.");
960 
961   protected:
962     /**
963      * A generic matrix object in PETSc. The actual type, a sparse matrix, is
964      * set in the constructor.
965      */
966     Mat matrix;
967 
968     /**
969      * Store whether the last action was a write or add operation.
970      */
971     VectorOperation::values last_action;
972 
973     /**
974      * Ensure that the add/set mode that is required for actions following
975      * this call is compatible with the current mode. Should be called from
976      * all internal functions accessing matrix elements.
977      */
978     void
979     prepare_action(const VectorOperation::values new_action);
980 
981     /**
982      * Internal function that checks that there are no pending insert/add
983      * operations. Throws an exception otherwise. Useful before calling any
984      * PETSc internal functions modifying the matrix.
985      */
986     void
987     assert_is_compressed();
988 
989     /**
990      * For some matrix storage formats, in particular for the PETSc
991      * distributed blockmatrices, set and add operations on individual
992      * elements can not be freely mixed. Rather, one has to synchronize
993      * operations when one wants to switch from setting elements to adding to
994      * elements. BlockMatrixBase automatically synchronizes the access by
995      * calling this helper function for each block. This function ensures that
996      * the matrix is in a state that allows adding elements; if it previously
997      * already was in this state, the function does nothing.
998      */
999     void
1000     prepare_add();
1001     /**
1002      * Same as prepare_add() but prepare the matrix for setting elements if
1003      * the representation of elements in this class requires such an
1004      * operation.
1005      */
1006     void
1007     prepare_set();
1008 
1009     /**
1010      * Base function to perform the matrix-matrix multiplication $C = AB$,
1011      * or, if a vector $V$ whose size is compatible with B is given,
1012      * $C = A \text{diag}(V) B$, where $\text{diag}(V)$ defines a
1013      * diagonal matrix with the vector entries.
1014      *
1015      * This function assumes that the calling matrix $A$ and $B$
1016      * have compatible sizes. The size of $C$ will be set within this
1017      * function.
1018      *
1019      * The content as well as the sparsity pattern of the matrix $C$ will be
1020      * reset by this function, so make sure that the sparsity pattern is not
1021      * used somewhere else in your program. This is an expensive operation, so
1022      * think twice before you use this function.
1023      */
1024     void
1025     mmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const;
1026 
1027     /**
1028      * Base function to perform the matrix-matrix multiplication with
1029      * the transpose of <tt>this</tt>, i.e., $C = A^T B$, or,
1030      * if an optional vector $V$ whose size is compatible with $B$ is given,
1031      * $C = A^T \text{diag}(V) B$, where $\text{diag}(V)$ defines a
1032      * diagonal matrix with the vector entries.
1033      *
1034      * This function assumes that the calling matrix $A$ and $B$
1035      * have compatible sizes. The size of $C$ will be set within this
1036      * function.
1037      *
1038      * The content as well as the sparsity pattern of the matrix $C$ will be
1039      * changed by this function, so make sure that the sparsity pattern is not
1040      * used somewhere else in your program. This is an expensive operation, so
1041      * think twice before you use this function.
1042      */
1043     void
1044     Tmmult(MatrixBase &C, const MatrixBase &B, const VectorBase &V) const;
1045 
1046   private:
1047     /**
1048      * An internal array of integer values that is used to store the column
1049      * indices when adding/inserting local data into the (large) sparse
1050      * matrix.
1051      *
1052      * This variable does not store any "state" of the matrix
1053      * object. Rather, it is only used as a temporary buffer by some
1054      * of the member functions of this class. As with all @p mutable
1055      * member variables, the use of this variable is not thread-safe
1056      * unless guarded by a mutex. However, since PETSc matrix
1057      * operations are not thread-safe anyway, there is no need to
1058      * attempt to make things thread-safe, and so there is no mutex
1059      * associated with this variable.
1060      */
1061     mutable std::vector<PetscInt> column_indices;
1062 
1063     /**
1064      * An internal array of double values that is used to store the column
1065      * indices when adding/inserting local data into the (large) sparse
1066      * matrix.
1067      *
1068      * The same comment as for the @p column_indices variable above
1069      * applies.
1070      */
1071     mutable std::vector<PetscScalar> column_values;
1072 
1073 
1074     // To allow calling protected prepare_add() and prepare_set().
1075     template <class>
1076     friend class dealii::BlockMatrixBase;
1077   };
1078 
1079 
1080 
1081 #    ifndef DOXYGEN
1082   // ---------------------- inline and template functions ---------------------
1083 
1084 
1085   namespace MatrixIterators
1086   {
Accessor(const MatrixBase * matrix,const size_type row,const size_type index)1087     inline const_iterator::Accessor::Accessor(const MatrixBase *matrix,
1088                                               const size_type   row,
1089                                               const size_type   index)
1090       : matrix(const_cast<MatrixBase *>(matrix))
1091       , a_row(row)
1092       , a_index(index)
1093     {
1094       visit_present_row();
1095     }
1096 
1097 
1098 
1099     inline const_iterator::Accessor::size_type
row()1100     const_iterator::Accessor::row() const
1101     {
1102       Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
1103       return a_row;
1104     }
1105 
1106 
1107     inline const_iterator::Accessor::size_type
column()1108     const_iterator::Accessor::column() const
1109     {
1110       Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
1111       return (*colnum_cache)[a_index];
1112     }
1113 
1114 
1115     inline const_iterator::Accessor::size_type
index()1116     const_iterator::Accessor::index() const
1117     {
1118       Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
1119       return a_index;
1120     }
1121 
1122 
1123     inline PetscScalar
value()1124     const_iterator::Accessor::value() const
1125     {
1126       Assert(a_row < matrix->m(), ExcBeyondEndOfMatrix());
1127       return (*value_cache)[a_index];
1128     }
1129 
1130 
const_iterator(const MatrixBase * matrix,const size_type row,const size_type index)1131     inline const_iterator::const_iterator(const MatrixBase *matrix,
1132                                           const size_type   row,
1133                                           const size_type   index)
1134       : accessor(matrix, row, index)
1135     {}
1136 
1137 
1138 
1139     inline const_iterator &
1140     const_iterator::operator++()
1141     {
1142       Assert(accessor.a_row < accessor.matrix->m(), ExcIteratorPastEnd());
1143 
1144       ++accessor.a_index;
1145 
1146       // if at end of line: do one step, then cycle until we find a
1147       // row with a nonzero number of entries
1148       if (accessor.a_index >= accessor.colnum_cache->size())
1149         {
1150           accessor.a_index = 0;
1151           ++accessor.a_row;
1152 
1153           while ((accessor.a_row < accessor.matrix->m()) &&
1154                  (accessor.a_row < accessor.matrix->local_range().second) &&
1155                  (accessor.matrix->row_length(accessor.a_row) == 0))
1156             ++accessor.a_row;
1157 
1158           accessor.visit_present_row();
1159         }
1160       return *this;
1161     }
1162 
1163 
1164     inline const_iterator
1165     const_iterator::operator++(int)
1166     {
1167       const const_iterator old_state = *this;
1168       ++(*this);
1169       return old_state;
1170     }
1171 
1172 
1173     inline const const_iterator::Accessor &const_iterator::operator*() const
1174     {
1175       return accessor;
1176     }
1177 
1178 
1179     inline const const_iterator::Accessor *const_iterator::operator->() const
1180     {
1181       return &accessor;
1182     }
1183 
1184 
1185     inline bool
1186     const_iterator::operator==(const const_iterator &other) const
1187     {
1188       return (accessor.a_row == other.accessor.a_row &&
1189               accessor.a_index == other.accessor.a_index);
1190     }
1191 
1192 
1193     inline bool
1194     const_iterator::operator!=(const const_iterator &other) const
1195     {
1196       return !(*this == other);
1197     }
1198 
1199 
1200     inline bool
1201     const_iterator::operator<(const const_iterator &other) const
1202     {
1203       return (accessor.row() < other.accessor.row() ||
1204               (accessor.row() == other.accessor.row() &&
1205                accessor.index() < other.accessor.index()));
1206     }
1207 
1208   } // namespace MatrixIterators
1209 
1210 
1211 
1212   // Inline the set() and add()
1213   // functions, since they will be
1214   // called frequently, and the
1215   // compiler can optimize away
1216   // some unnecessary loops when
1217   // the sizes are given at
1218   // compile time.
1219   inline void
set(const size_type i,const size_type j,const PetscScalar value)1220   MatrixBase::set(const size_type i, const size_type j, const PetscScalar value)
1221   {
1222     AssertIsFinite(value);
1223 
1224     set(i, 1, &j, &value, false);
1225   }
1226 
1227 
1228 
1229   inline void
set(const std::vector<size_type> & indices,const FullMatrix<PetscScalar> & values,const bool elide_zero_values)1230   MatrixBase::set(const std::vector<size_type> & indices,
1231                   const FullMatrix<PetscScalar> &values,
1232                   const bool                     elide_zero_values)
1233   {
1234     Assert(indices.size() == values.m(),
1235            ExcDimensionMismatch(indices.size(), values.m()));
1236     Assert(values.m() == values.n(), ExcNotQuadratic());
1237 
1238     for (size_type i = 0; i < indices.size(); ++i)
1239       set(indices[i],
1240           indices.size(),
1241           indices.data(),
1242           &values(i, 0),
1243           elide_zero_values);
1244   }
1245 
1246 
1247 
1248   inline void
set(const std::vector<size_type> & row_indices,const std::vector<size_type> & col_indices,const FullMatrix<PetscScalar> & values,const bool elide_zero_values)1249   MatrixBase::set(const std::vector<size_type> & row_indices,
1250                   const std::vector<size_type> & col_indices,
1251                   const FullMatrix<PetscScalar> &values,
1252                   const bool                     elide_zero_values)
1253   {
1254     Assert(row_indices.size() == values.m(),
1255            ExcDimensionMismatch(row_indices.size(), values.m()));
1256     Assert(col_indices.size() == values.n(),
1257            ExcDimensionMismatch(col_indices.size(), values.n()));
1258 
1259     for (size_type i = 0; i < row_indices.size(); ++i)
1260       set(row_indices[i],
1261           col_indices.size(),
1262           col_indices.data(),
1263           &values(i, 0),
1264           elide_zero_values);
1265   }
1266 
1267 
1268 
1269   inline void
set(const size_type row,const std::vector<size_type> & col_indices,const std::vector<PetscScalar> & values,const bool elide_zero_values)1270   MatrixBase::set(const size_type                 row,
1271                   const std::vector<size_type> &  col_indices,
1272                   const std::vector<PetscScalar> &values,
1273                   const bool                      elide_zero_values)
1274   {
1275     Assert(col_indices.size() == values.size(),
1276            ExcDimensionMismatch(col_indices.size(), values.size()));
1277 
1278     set(row,
1279         col_indices.size(),
1280         col_indices.data(),
1281         values.data(),
1282         elide_zero_values);
1283   }
1284 
1285 
1286 
1287   inline void
set(const size_type row,const size_type n_cols,const size_type * col_indices,const PetscScalar * values,const bool elide_zero_values)1288   MatrixBase::set(const size_type    row,
1289                   const size_type    n_cols,
1290                   const size_type *  col_indices,
1291                   const PetscScalar *values,
1292                   const bool         elide_zero_values)
1293   {
1294     prepare_action(VectorOperation::insert);
1295 
1296     const PetscInt  petsc_i = row;
1297     PetscInt const *col_index_ptr;
1298 
1299     PetscScalar const *col_value_ptr;
1300     int                n_columns;
1301 
1302     // If we don't elide zeros, the pointers are already available...
1303     if (elide_zero_values == false)
1304       {
1305         col_index_ptr = reinterpret_cast<const PetscInt *>(col_indices);
1306         col_value_ptr = values;
1307         n_columns     = n_cols;
1308       }
1309     else
1310       {
1311         // Otherwise, extract nonzero values in each row and get the
1312         // respective index.
1313         if (column_indices.size() < n_cols)
1314           {
1315             column_indices.resize(n_cols);
1316             column_values.resize(n_cols);
1317           }
1318 
1319         n_columns = 0;
1320         for (size_type j = 0; j < n_cols; ++j)
1321           {
1322             const PetscScalar value = values[j];
1323             AssertIsFinite(value);
1324             if (value != PetscScalar())
1325               {
1326                 column_indices[n_columns] = col_indices[j];
1327                 column_values[n_columns]  = value;
1328                 n_columns++;
1329               }
1330           }
1331         AssertIndexRange(n_columns, n_cols + 1);
1332 
1333         col_index_ptr = column_indices.data();
1334         col_value_ptr = column_values.data();
1335       }
1336 
1337     const PetscErrorCode ierr = MatSetValues(matrix,
1338                                              1,
1339                                              &petsc_i,
1340                                              n_columns,
1341                                              col_index_ptr,
1342                                              col_value_ptr,
1343                                              INSERT_VALUES);
1344     AssertThrow(ierr == 0, ExcPETScError(ierr));
1345   }
1346 
1347 
1348 
1349   inline void
add(const size_type i,const size_type j,const PetscScalar value)1350   MatrixBase::add(const size_type i, const size_type j, const PetscScalar value)
1351   {
1352     AssertIsFinite(value);
1353 
1354     if (value == PetscScalar())
1355       {
1356         // we have to check after using Insert/Add in any case to be
1357         // consistent with the MPI communication model, but we can save
1358         // some work if the addend is zero. However, these actions are done
1359         // in case we pass on to the other function.
1360         prepare_action(VectorOperation::add);
1361 
1362         return;
1363       }
1364     else
1365       add(i, 1, &j, &value, false);
1366   }
1367 
1368 
1369 
1370   inline void
add(const std::vector<size_type> & indices,const FullMatrix<PetscScalar> & values,const bool elide_zero_values)1371   MatrixBase::add(const std::vector<size_type> & indices,
1372                   const FullMatrix<PetscScalar> &values,
1373                   const bool                     elide_zero_values)
1374   {
1375     Assert(indices.size() == values.m(),
1376            ExcDimensionMismatch(indices.size(), values.m()));
1377     Assert(values.m() == values.n(), ExcNotQuadratic());
1378 
1379     for (size_type i = 0; i < indices.size(); ++i)
1380       add(indices[i],
1381           indices.size(),
1382           indices.data(),
1383           &values(i, 0),
1384           elide_zero_values);
1385   }
1386 
1387 
1388 
1389   inline void
add(const std::vector<size_type> & row_indices,const std::vector<size_type> & col_indices,const FullMatrix<PetscScalar> & values,const bool elide_zero_values)1390   MatrixBase::add(const std::vector<size_type> & row_indices,
1391                   const std::vector<size_type> & col_indices,
1392                   const FullMatrix<PetscScalar> &values,
1393                   const bool                     elide_zero_values)
1394   {
1395     Assert(row_indices.size() == values.m(),
1396            ExcDimensionMismatch(row_indices.size(), values.m()));
1397     Assert(col_indices.size() == values.n(),
1398            ExcDimensionMismatch(col_indices.size(), values.n()));
1399 
1400     for (size_type i = 0; i < row_indices.size(); ++i)
1401       add(row_indices[i],
1402           col_indices.size(),
1403           col_indices.data(),
1404           &values(i, 0),
1405           elide_zero_values);
1406   }
1407 
1408 
1409 
1410   inline void
add(const size_type row,const std::vector<size_type> & col_indices,const std::vector<PetscScalar> & values,const bool elide_zero_values)1411   MatrixBase::add(const size_type                 row,
1412                   const std::vector<size_type> &  col_indices,
1413                   const std::vector<PetscScalar> &values,
1414                   const bool                      elide_zero_values)
1415   {
1416     Assert(col_indices.size() == values.size(),
1417            ExcDimensionMismatch(col_indices.size(), values.size()));
1418 
1419     add(row,
1420         col_indices.size(),
1421         col_indices.data(),
1422         values.data(),
1423         elide_zero_values);
1424   }
1425 
1426 
1427 
1428   inline void
add(const size_type row,const size_type n_cols,const size_type * col_indices,const PetscScalar * values,const bool elide_zero_values,const bool)1429   MatrixBase::add(const size_type    row,
1430                   const size_type    n_cols,
1431                   const size_type *  col_indices,
1432                   const PetscScalar *values,
1433                   const bool         elide_zero_values,
1434                   const bool /*col_indices_are_sorted*/)
1435   {
1436     (void)elide_zero_values;
1437 
1438     prepare_action(VectorOperation::add);
1439 
1440     const PetscInt  petsc_i = row;
1441     PetscInt const *col_index_ptr;
1442 
1443     PetscScalar const *col_value_ptr;
1444     int                n_columns;
1445 
1446     // If we don't elide zeros, the pointers are already available...
1447     if (elide_zero_values == false)
1448       {
1449         col_index_ptr = reinterpret_cast<const PetscInt *>(col_indices);
1450         col_value_ptr = values;
1451         n_columns     = n_cols;
1452       }
1453     else
1454       {
1455         // Otherwise, extract nonzero values in each row and get the
1456         // respective index.
1457         if (column_indices.size() < n_cols)
1458           {
1459             column_indices.resize(n_cols);
1460             column_values.resize(n_cols);
1461           }
1462 
1463         n_columns = 0;
1464         for (size_type j = 0; j < n_cols; ++j)
1465           {
1466             const PetscScalar value = values[j];
1467             AssertIsFinite(value);
1468             if (value != PetscScalar())
1469               {
1470                 column_indices[n_columns] = col_indices[j];
1471                 column_values[n_columns]  = value;
1472                 n_columns++;
1473               }
1474           }
1475         AssertIndexRange(n_columns, n_cols + 1);
1476 
1477         col_index_ptr = column_indices.data();
1478         col_value_ptr = column_values.data();
1479       }
1480 
1481     const PetscErrorCode ierr = MatSetValues(
1482       matrix, 1, &petsc_i, n_columns, col_index_ptr, col_value_ptr, ADD_VALUES);
1483     AssertThrow(ierr == 0, ExcPETScError(ierr));
1484   }
1485 
1486 
1487 
1488   inline PetscScalar
operator()1489   MatrixBase::operator()(const size_type i, const size_type j) const
1490   {
1491     return el(i, j);
1492   }
1493 
1494 
1495 
1496   inline MatrixBase::const_iterator
begin()1497   MatrixBase::begin() const
1498   {
1499     Assert(
1500       (in_local_range(0) && in_local_range(m() - 1)),
1501       ExcMessage(
1502         "begin() and end() can only be called on a processor owning the entire matrix. If this is a distributed matrix, use begin(row) and end(row) instead."));
1503 
1504     // find the first non-empty row in order to make sure that the returned
1505     // iterator points to something useful
1506     size_type first_nonempty_row = 0;
1507     while ((first_nonempty_row < m()) && (row_length(first_nonempty_row) == 0))
1508       ++first_nonempty_row;
1509 
1510     return const_iterator(this, first_nonempty_row, 0);
1511   }
1512 
1513 
1514   inline MatrixBase::const_iterator
end()1515   MatrixBase::end() const
1516   {
1517     Assert(
1518       (in_local_range(0) && in_local_range(m() - 1)),
1519       ExcMessage(
1520         "begin() and end() can only be called on a processor owning the entire matrix. If this is a distributed matrix, use begin(row) and end(row) instead."));
1521 
1522     return const_iterator(this, m(), 0);
1523   }
1524 
1525 
1526   inline MatrixBase::const_iterator
begin(const size_type r)1527   MatrixBase::begin(const size_type r) const
1528   {
1529     Assert(in_local_range(r),
1530            ExcIndexRange(r, local_range().first, local_range().second));
1531 
1532     if (row_length(r) > 0)
1533       return const_iterator(this, r, 0);
1534     else
1535       return end(r);
1536   }
1537 
1538 
1539   inline MatrixBase::const_iterator
end(const size_type r)1540   MatrixBase::end(const size_type r) const
1541   {
1542     Assert(in_local_range(r),
1543            ExcIndexRange(r, local_range().first, local_range().second));
1544 
1545     // place the iterator on the first entry past this line, or at the
1546     // end of the matrix
1547     //
1548     // in the parallel case, we need to put it on the first entry of
1549     // the first row after the locally owned range. this of course
1550     // doesn't exist, but we can nevertheless create such an
1551     // iterator. we need to check whether 'i' is past the locally
1552     // owned range of rows first, before we ask for the length of the
1553     // row since the latter query leads to an exception in case we ask
1554     // for a row that is not locally owned
1555     for (size_type i = r + 1; i < m(); ++i)
1556       if (i == local_range().second || (row_length(i) > 0))
1557         return const_iterator(this, i, 0);
1558 
1559     // if there is no such line, then take the
1560     // end iterator of the matrix
1561     // we don't allow calling end() directly for distributed matrices so we need
1562     // to copy the code without the assertion.
1563     return {this, m(), 0};
1564   }
1565 
1566 
1567 
1568   inline bool
in_local_range(const size_type index)1569   MatrixBase::in_local_range(const size_type index) const
1570   {
1571     PetscInt begin, end;
1572 
1573     const PetscErrorCode ierr =
1574       MatGetOwnershipRange(static_cast<const Mat &>(matrix), &begin, &end);
1575     AssertThrow(ierr == 0, ExcPETScError(ierr));
1576 
1577     return ((index >= static_cast<size_type>(begin)) &&
1578             (index < static_cast<size_type>(end)));
1579   }
1580 
1581 
1582 
1583   inline void
prepare_action(const VectorOperation::values new_action)1584   MatrixBase::prepare_action(const VectorOperation::values new_action)
1585   {
1586     if (last_action == VectorOperation::unknown)
1587       last_action = new_action;
1588 
1589     Assert(last_action == new_action, ExcWrongMode(last_action, new_action));
1590   }
1591 
1592 
1593 
1594   inline void
assert_is_compressed()1595   MatrixBase::assert_is_compressed()
1596   {
1597     // compress() sets the last action to none, which allows us to check if
1598     // there are pending add/insert operations:
1599     AssertThrow(last_action == VectorOperation::unknown,
1600                 ExcMessage("Error: missing compress() call."));
1601   }
1602 
1603 
1604 
1605   inline void
prepare_add()1606   MatrixBase::prepare_add()
1607   {
1608     prepare_action(VectorOperation::add);
1609   }
1610 
1611 
1612 
1613   inline void
prepare_set()1614   MatrixBase::prepare_set()
1615   {
1616     prepare_action(VectorOperation::insert);
1617   }
1618 
1619 #    endif // DOXYGEN
1620 } // namespace PETScWrappers
1621 
1622 
1623 DEAL_II_NAMESPACE_CLOSE
1624 
1625 
1626 #  endif // DEAL_II_WITH_PETSC
1627 
1628 #endif
1629 /*---------------------------- petsc_matrix_base.h --------------------------*/
1630