1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
15 /** \ingroup SparseCore_Module
16   *
17   * \class SparseMatrix
18   *
19   * \brief A versatible sparse matrix representation
20   *
21   * This class implements a more versatile variants of the common \em compressed row/column storage format.
22   * Each colmun's (resp. row) non zeros are stored as a pair of value with associated row (resp. colmiun) index.
23   * All the non zeros are stored in a single large buffer. Unlike the \em compressed format, there might be extra
24   * space in between the nonzeros of two successive colmuns (resp. rows) such that insertion of new non-zero
25   * can be done with limited memory reallocation and copies.
26   *
27   * A call to the function makeCompressed() turns the matrix into the standard \em compressed format
28   * compatible with many library.
29   *
30   * More details on this storage sceheme are given in the \ref TutorialSparse "manual pages".
31   *
32   * \tparam _Scalar the scalar type, i.e. the type of the coefficients
33   * \tparam _Options Union of bit flags controlling the storage scheme. Currently the only possibility
34   *                 is ColMajor or RowMajor. The default is 0 which means column-major.
35   * \tparam _StorageIndex the type of the indices. It has to be a \b signed type (e.g., short, int, std::ptrdiff_t). Default is \c int.
36   *
37   * \warning In %Eigen 3.2, the undocumented type \c SparseMatrix::Index was improperly defined as the storage index type (e.g., int),
38   *          whereas it is now (starting from %Eigen 3.3) deprecated and always defined as Eigen::Index.
39   *          Codes making use of \c SparseMatrix::Index, might thus likely have to be changed to use \c SparseMatrix::StorageIndex instead.
40   *
41   * This class can be extended with the help of the plugin mechanism described on the page
42   * \ref TopicCustomizing_Plugins by defining the preprocessor symbol \c EIGEN_SPARSEMATRIX_PLUGIN.
43   */
44 
45 namespace internal {
46 template<typename _Scalar, int _Options, typename _StorageIndex>
47 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48 {
49   typedef _Scalar Scalar;
50   typedef _StorageIndex StorageIndex;
51   typedef Sparse StorageKind;
52   typedef MatrixXpr XprKind;
53   enum {
54     RowsAtCompileTime = Dynamic,
55     ColsAtCompileTime = Dynamic,
56     MaxRowsAtCompileTime = Dynamic,
57     MaxColsAtCompileTime = Dynamic,
58     Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59     SupportedAccessPatterns = InnerRandomAccessPattern
60   };
61 };
62 
63 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65 {
66   typedef SparseMatrix<_Scalar, _Options, _StorageIndex> MatrixType;
67   typedef typename ref_selector<MatrixType>::type MatrixTypeNested;
68   typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
69 
70   typedef _Scalar Scalar;
71   typedef Dense StorageKind;
72   typedef _StorageIndex StorageIndex;
73   typedef MatrixXpr XprKind;
74 
75   enum {
76     RowsAtCompileTime = Dynamic,
77     ColsAtCompileTime = 1,
78     MaxRowsAtCompileTime = Dynamic,
79     MaxColsAtCompileTime = 1,
80     Flags = LvalueBit
81   };
82 };
83 
84 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87 {
88   enum {
89     Flags = 0
90   };
91 };
92 
93 } // end namespace internal
94 
95 template<typename _Scalar, int _Options, typename _StorageIndex>
96 class SparseMatrix
97   : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98 {
99     typedef SparseCompressedBase<SparseMatrix> Base;
100     using Base::convert_index;
101     friend class SparseVector<_Scalar,0,_StorageIndex>;
102     template<typename, typename, typename, typename, typename>
103     friend struct internal::Assignment;
104   public:
105     using Base::isCompressed;
106     using Base::nonZeros;
107     EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
108     using Base::operator+=;
109     using Base::operator-=;
110 
111     typedef MappedSparseMatrix<Scalar,Flags> Map;
112     typedef Diagonal<SparseMatrix> DiagonalReturnType;
113     typedef Diagonal<const SparseMatrix> ConstDiagonalReturnType;
114     typedef typename Base::InnerIterator InnerIterator;
115     typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116 
117 
118     using Base::IsRowMajor;
119     typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
120     enum {
121       Options = _Options
122     };
123 
124     typedef typename Base::IndexVector IndexVector;
125     typedef typename Base::ScalarVector ScalarVector;
126   protected:
127     typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
128 
129     Index m_outerSize;
130     Index m_innerSize;
131     StorageIndex* m_outerIndex;
132     StorageIndex* m_innerNonZeros;     // optional, if null then the data is compressed
133     Storage m_data;
134 
135   public:
136 
137     /** \returns the number of rows of the matrix */
138     inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
139     /** \returns the number of columns of the matrix */
140     inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141 
142     /** \returns the number of rows (resp. columns) of the matrix if the storage order column major (resp. row major) */
143     inline Index innerSize() const { return m_innerSize; }
144     /** \returns the number of columns (resp. rows) of the matrix if the storage order column major (resp. row major) */
145     inline Index outerSize() const { return m_outerSize; }
146 
147     /** \returns a const pointer to the array of values.
148       * This function is aimed at interoperability with other libraries.
149       * \sa innerIndexPtr(), outerIndexPtr() */
150     inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
151     /** \returns a non-const pointer to the array of values.
152       * This function is aimed at interoperability with other libraries.
153       * \sa innerIndexPtr(), outerIndexPtr() */
154     inline Scalar* valuePtr() { return m_data.valuePtr(); }
155 
156     /** \returns a const pointer to the array of inner indices.
157       * This function is aimed at interoperability with other libraries.
158       * \sa valuePtr(), outerIndexPtr() */
159     inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
160     /** \returns a non-const pointer to the array of inner indices.
161       * This function is aimed at interoperability with other libraries.
162       * \sa valuePtr(), outerIndexPtr() */
163     inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164 
165     /** \returns a const pointer to the array of the starting positions of the inner vectors.
166       * This function is aimed at interoperability with other libraries.
167       * \sa valuePtr(), innerIndexPtr() */
168     inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
169     /** \returns a non-const pointer to the array of the starting positions of the inner vectors.
170       * This function is aimed at interoperability with other libraries.
171       * \sa valuePtr(), innerIndexPtr() */
172     inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173 
174     /** \returns a const pointer to the array of the number of non zeros of the inner vectors.
175       * This function is aimed at interoperability with other libraries.
176       * \warning it returns the null pointer 0 in compressed mode */
177     inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
178     /** \returns a non-const pointer to the array of the number of non zeros of the inner vectors.
179       * This function is aimed at interoperability with other libraries.
180       * \warning it returns the null pointer 0 in compressed mode */
181     inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182 
183     /** \internal */
184     inline Storage& data() { return m_data; }
185     /** \internal */
186     inline const Storage& data() const { return m_data; }
187 
188     /** \returns the value of the matrix at position \a i, \a j
189       * This function returns Scalar(0) if the element is an explicit \em zero */
190     inline Scalar coeff(Index row, Index col) const
191     {
192       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193 
194       const Index outer = IsRowMajor ? row : col;
195       const Index inner = IsRowMajor ? col : row;
196       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197       return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198     }
199 
200     /** \returns a non-const reference to the value of the matrix at position \a i, \a j
201       *
202       * If the element does not exist then it is inserted via the insert(Index,Index) function
203       * which itself turns the matrix into a non compressed form if that was not the case.
204       *
205       * This is a O(log(nnz_j)) operation (binary search) plus the cost of insert(Index,Index)
206       * function if the element does not already exist.
207       */
208     inline Scalar& coeffRef(Index row, Index col)
209     {
210       eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211 
212       const Index outer = IsRowMajor ? row : col;
213       const Index inner = IsRowMajor ? col : row;
214 
215       Index start = m_outerIndex[outer];
216       Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217       eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218       if(end<=start)
219         return insert(row,col);
220       const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221       if((p<end) && (m_data.index(p)==inner))
222         return m_data.value(p);
223       else
224         return insert(row,col);
225     }
226 
227     /** \returns a reference to a novel non zero coefficient with coordinates \a row x \a col.
228       * The non zero coefficient must \b not already exist.
229       *
230       * If the matrix \c *this is in compressed mode, then \c *this is turned into uncompressed
231       * mode while reserving room for 2 x this->innerSize() non zeros if reserve(Index) has not been called earlier.
232       * In this case, the insertion procedure is optimized for a \e sequential insertion mode where elements are assumed to be
233       * inserted by increasing outer-indices.
234       *
235       * If that's not the case, then it is strongly recommended to either use a triplet-list to assemble the matrix, or to first
236       * call reserve(const SizesType &) to reserve the appropriate number of non-zero elements per inner vector.
237       *
238       * Assuming memory has been appropriately reserved, this function performs a sorted insertion in O(1)
239       * if the elements of each inner vector are inserted in increasing inner index order, and in O(nnz_j) for a random insertion.
240       *
241       */
242     Scalar& insert(Index row, Index col);
243 
244   public:
245 
246     /** Removes all non zeros but keep allocated memory
247       *
248       * This function does not free the currently allocated memory. To release as much as memory as possible,
249       * call \code mat.data().squeeze(); \endcode after resizing it.
250       *
251       * \sa resize(Index,Index), data()
252       */
253     inline void setZero()
254     {
255       m_data.clear();
256       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257       if(m_innerNonZeros)
258         memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259     }
260 
261     /** Preallocates \a reserveSize non zeros.
262       *
263       * Precondition: the matrix must be in compressed mode. */
264     inline void reserve(Index reserveSize)
265     {
266       eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267       m_data.reserve(reserveSize);
268     }
269 
270     #ifdef EIGEN_PARSED_BY_DOXYGEN
271     /** Preallocates \a reserveSize[\c j] non zeros for each column (resp. row) \c j.
272       *
273       * This function turns the matrix in non-compressed mode.
274       *
275       * The type \c SizesType must expose the following interface:
276         \code
277         typedef value_type;
278         const value_type& operator[](i) const;
279         \endcode
280       * for \c i in the [0,this->outerSize()[ range.
281       * Typical choices include std::vector<int>, Eigen::VectorXi, Eigen::VectorXi::Constant, etc.
282       */
283     template<class SizesType>
284     inline void reserve(const SizesType& reserveSizes);
285     #else
286     template<class SizesType>
287     inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288     #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289         typename
290     #endif
291         SizesType::value_type())
292     {
293       EIGEN_UNUSED_VARIABLE(enableif);
294       reserveInnerVectors(reserveSizes);
295     }
296     #endif // EIGEN_PARSED_BY_DOXYGEN
297   protected:
298     template<class SizesType>
299     inline void reserveInnerVectors(const SizesType& reserveSizes)
300     {
301       if(isCompressed())
302       {
303         Index totalReserveSize = 0;
304         // turn the matrix into non-compressed mode
305         m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
306         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307 
308         // temporarily use m_innerSizes to hold the new starting points.
309         StorageIndex* newOuterIndex = m_innerNonZeros;
310 
311         StorageIndex count = 0;
312         for(Index j=0; j<m_outerSize; ++j)
313         {
314           newOuterIndex[j] = count;
315           count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316           totalReserveSize += reserveSizes[j];
317         }
318         m_data.reserve(totalReserveSize);
319         StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320         for(Index j=m_outerSize-1; j>=0; --j)
321         {
322           StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323           for(Index i=innerNNZ-1; i>=0; --i)
324           {
325             m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326             m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327           }
328           previousOuterIndex = m_outerIndex[j];
329           m_outerIndex[j] = newOuterIndex[j];
330           m_innerNonZeros[j] = innerNNZ;
331         }
332         if(m_outerSize>0)
333           m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
334 
335         m_data.resize(m_outerIndex[m_outerSize]);
336       }
337       else
338       {
339         StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340         if (!newOuterIndex) internal::throw_std_bad_alloc();
341 
342         StorageIndex count = 0;
343         for(Index j=0; j<m_outerSize; ++j)
344         {
345           newOuterIndex[j] = count;
346           StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347           StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348           count += toReserve + m_innerNonZeros[j];
349         }
350         newOuterIndex[m_outerSize] = count;
351 
352         m_data.resize(count);
353         for(Index j=m_outerSize-1; j>=0; --j)
354         {
355           Index offset = newOuterIndex[j] - m_outerIndex[j];
356           if(offset>0)
357           {
358             StorageIndex innerNNZ = m_innerNonZeros[j];
359             for(Index i=innerNNZ-1; i>=0; --i)
360             {
361               m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362               m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363             }
364           }
365         }
366 
367         std::swap(m_outerIndex, newOuterIndex);
368         std::free(newOuterIndex);
369       }
370 
371     }
372   public:
373 
374     //--- low level purely coherent filling ---
375 
376     /** \internal
377       * \returns a reference to the non zero coefficient at position \a row, \a col assuming that:
378       * - the nonzero does not already exist
379       * - the new coefficient is the last one according to the storage order
380       *
381       * Before filling a given inner vector you must call the statVec(Index) function.
382       *
383       * After an insertion session, you should call the finalize() function.
384       *
385       * \sa insert, insertBackByOuterInner, startVec */
386     inline Scalar& insertBack(Index row, Index col)
387     {
388       return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
389     }
390 
391     /** \internal
392       * \sa insertBack, startVec */
393     inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394     {
395       eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396       eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397       Index p = m_outerIndex[outer+1];
398       ++m_outerIndex[outer+1];
399       m_data.append(Scalar(0), inner);
400       return m_data.value(p);
401     }
402 
403     /** \internal
404       * \warning use it only if you know what you are doing */
405     inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
406     {
407       Index p = m_outerIndex[outer+1];
408       ++m_outerIndex[outer+1];
409       m_data.append(Scalar(0), inner);
410       return m_data.value(p);
411     }
412 
413     /** \internal
414       * \sa insertBack, insertBackByOuterInner */
415     inline void startVec(Index outer)
416     {
417       eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418       eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419       m_outerIndex[outer+1] = m_outerIndex[outer];
420     }
421 
422     /** \internal
423       * Must be called after inserting a set of non zero entries using the low level compressed API.
424       */
425     inline void finalize()
426     {
427       if(isCompressed())
428       {
429         StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430         Index i = m_outerSize;
431         // find the last filled column
432         while (i>=0 && m_outerIndex[i]==0)
433           --i;
434         ++i;
435         while (i<=m_outerSize)
436         {
437           m_outerIndex[i] = size;
438           ++i;
439         }
440       }
441     }
442 
443     //---
444 
445     template<typename InputIterators>
446     void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447 
448     template<typename InputIterators,typename DupFunctor>
449     void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450 
451     void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
452 
453     template<typename DupFunctor>
454     void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455 
456     //---
457 
458     /** \internal
459       * same as insert(Index,Index) except that the indices are given relative to the storage order */
460     Scalar& insertByOuterInner(Index j, Index i)
461     {
462       return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463     }
464 
465     /** Turns the matrix into the \em compressed format.
466       */
467     void makeCompressed()
468     {
469       if(isCompressed())
470         return;
471 
472       eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
473 
474       Index oldStart = m_outerIndex[1];
475       m_outerIndex[1] = m_innerNonZeros[0];
476       for(Index j=1; j<m_outerSize; ++j)
477       {
478         Index nextOldStart = m_outerIndex[j+1];
479         Index offset = oldStart - m_outerIndex[j];
480         if(offset>0)
481         {
482           for(Index k=0; k<m_innerNonZeros[j]; ++k)
483           {
484             m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485             m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486           }
487         }
488         m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489         oldStart = nextOldStart;
490       }
491       std::free(m_innerNonZeros);
492       m_innerNonZeros = 0;
493       m_data.resize(m_outerIndex[m_outerSize]);
494       m_data.squeeze();
495     }
496 
497     /** Turns the matrix into the uncompressed mode */
498     void uncompress()
499     {
500       if(m_innerNonZeros != 0)
501         return;
502       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503       for (Index i = 0; i < m_outerSize; i++)
504       {
505         m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
506       }
507     }
508 
509     /** Suppresses all nonzeros which are \b much \b smaller \b than \a reference under the tolerance \a epsilon */
510     void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
511     {
512       prune(default_prunning_func(reference,epsilon));
513     }
514 
515     /** Turns the matrix into compressed format, and suppresses all nonzeros which do not satisfy the predicate \a keep.
516       * The functor type \a KeepFunc must implement the following function:
517       * \code
518       * bool operator() (const Index& row, const Index& col, const Scalar& value) const;
519       * \endcode
520       * \sa prune(Scalar,RealScalar)
521       */
522     template<typename KeepFunc>
523     void prune(const KeepFunc& keep = KeepFunc())
524     {
525       // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
526       makeCompressed();
527 
528       StorageIndex k = 0;
529       for(Index j=0; j<m_outerSize; ++j)
530       {
531         Index previousStart = m_outerIndex[j];
532         m_outerIndex[j] = k;
533         Index end = m_outerIndex[j+1];
534         for(Index i=previousStart; i<end; ++i)
535         {
536           if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537           {
538             m_data.value(k) = m_data.value(i);
539             m_data.index(k) = m_data.index(i);
540             ++k;
541           }
542         }
543       }
544       m_outerIndex[m_outerSize] = k;
545       m_data.resize(k,0);
546     }
547 
548     /** Resizes the matrix to a \a rows x \a cols matrix leaving old values untouched.
549       *
550       * If the sizes of the matrix are decreased, then the matrix is turned to \b uncompressed-mode
551       * and the storage of the out of bounds coefficients is kept and reserved.
552       * Call makeCompressed() to pack the entries and squeeze extra memory.
553       *
554       * \sa reserve(), setZero(), makeCompressed()
555       */
556     void conservativeResize(Index rows, Index cols)
557     {
558       // No change
559       if (this->rows() == rows && this->cols() == cols) return;
560 
561       // If one dimension is null, then there is nothing to be preserved
562       if(rows==0 || cols==0) return resize(rows,cols);
563 
564       Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565       Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566       StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567 
568       // Deals with inner non zeros
569       if (m_innerNonZeros)
570       {
571         // Resize m_innerNonZeros
572         StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573         if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574         m_innerNonZeros = newInnerNonZeros;
575 
576         for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577           m_innerNonZeros[i] = 0;
578       }
579       else if (innerChange < 0)
580       {
581         // Inner size decreased: allocate a new m_innerNonZeros
582         m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
583         if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584         for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585           m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586         for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587           m_innerNonZeros[i] = 0;
588       }
589 
590       // Change the m_innerNonZeros in case of a decrease of inner size
591       if (m_innerNonZeros && innerChange < 0)
592       {
593         for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594         {
595           StorageIndex &n = m_innerNonZeros[i];
596           StorageIndex start = m_outerIndex[i];
597           while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598         }
599       }
600 
601       m_innerSize = newInnerSize;
602 
603       // Re-allocate outer index structure if necessary
604       if (outerChange == 0)
605         return;
606 
607       StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608       if (!newOuterIndex) internal::throw_std_bad_alloc();
609       m_outerIndex = newOuterIndex;
610       if (outerChange > 0)
611       {
612         StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613         for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614           m_outerIndex[i] = lastIdx;
615       }
616       m_outerSize += outerChange;
617     }
618 
619     /** Resizes the matrix to a \a rows x \a cols matrix and initializes it to zero.
620       *
621       * This function does not free the currently allocated memory. To release as much as memory as possible,
622       * call \code mat.data().squeeze(); \endcode after resizing it.
623       *
624       * \sa reserve(), setZero()
625       */
626     void resize(Index rows, Index cols)
627     {
628       const Index outerSize = IsRowMajor ? rows : cols;
629       m_innerSize = IsRowMajor ? cols : rows;
630       m_data.clear();
631       if (m_outerSize != outerSize || m_outerSize==0)
632       {
633         std::free(m_outerIndex);
634         m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
635         if (!m_outerIndex) internal::throw_std_bad_alloc();
636 
637         m_outerSize = outerSize;
638       }
639       if(m_innerNonZeros)
640       {
641         std::free(m_innerNonZeros);
642         m_innerNonZeros = 0;
643       }
644       memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645     }
646 
647     /** \internal
648       * Resize the nonzero vector to \a size */
649     void resizeNonZeros(Index size)
650     {
651       m_data.resize(size);
652     }
653 
654     /** \returns a const expression of the diagonal coefficients. */
655     const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
656 
657     /** \returns a read-write expression of the diagonal coefficients.
658       * \warning If the diagonal entries are written, then all diagonal
659       * entries \b must already exist, otherwise an assertion will be raised.
660       */
661     DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
662 
663     /** Default constructor yielding an empty \c 0 \c x \c 0 matrix */
664     inline SparseMatrix()
665       : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
666     {
667       check_template_parameters();
668       resize(0, 0);
669     }
670 
671     /** Constructs a \a rows \c x \a cols empty matrix */
672     inline SparseMatrix(Index rows, Index cols)
673       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674     {
675       check_template_parameters();
676       resize(rows, cols);
677     }
678 
679     /** Constructs a sparse matrix from the sparse expression \a other */
680     template<typename OtherDerived>
681     inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
682       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
683     {
684       EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
685         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686       check_template_parameters();
687       const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688       if (needToTranspose)
689         *this = other.derived();
690       else
691       {
692         #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
693           EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694         #endif
695         internal::call_assignment_no_alias(*this, other.derived());
696       }
697     }
698 
699     /** Constructs a sparse matrix from the sparse selfadjoint view \a other */
700     template<typename OtherDerived, unsigned int UpLo>
701     inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
702       : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
703     {
704       check_template_parameters();
705       Base::operator=(other);
706     }
707 
708     /** Copy constructor (it performs a deep copy) */
709     inline SparseMatrix(const SparseMatrix& other)
710       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711     {
712       check_template_parameters();
713       *this = other.derived();
714     }
715 
716     /** \brief Copy constructor with in-place evaluation */
717     template<typename OtherDerived>
718     SparseMatrix(const ReturnByValue<OtherDerived>& other)
719       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
720     {
721       check_template_parameters();
722       initAssignment(other);
723       other.evalTo(*this);
724     }
725 
726     /** \brief Copy constructor with in-place evaluation */
727     template<typename OtherDerived>
728     explicit SparseMatrix(const DiagonalBase<OtherDerived>& other)
729       : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
730     {
731       check_template_parameters();
732       *this = other.derived();
733     }
734 
735     /** Swaps the content of two sparse matrices of the same type.
736       * This is a fast operation that simply swaps the underlying pointers and parameters. */
737     inline void swap(SparseMatrix& other)
738     {
739       //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740       std::swap(m_outerIndex, other.m_outerIndex);
741       std::swap(m_innerSize, other.m_innerSize);
742       std::swap(m_outerSize, other.m_outerSize);
743       std::swap(m_innerNonZeros, other.m_innerNonZeros);
744       m_data.swap(other.m_data);
745     }
746 
747     /** Sets *this to the identity matrix.
748       * This function also turns the matrix into compressed mode, and drop any reserved memory. */
749     inline void setIdentity()
750     {
751       eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752       this->m_data.resize(rows());
753       Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754       Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755       Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756       std::free(m_innerNonZeros);
757       m_innerNonZeros = 0;
758     }
759     inline SparseMatrix& operator=(const SparseMatrix& other)
760     {
761       if (other.isRValue())
762       {
763         swap(other.const_cast_derived());
764       }
765       else if(this!=&other)
766       {
767         #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
768           EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769         #endif
770         initAssignment(other);
771         if(other.isCompressed())
772         {
773           internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774           m_data = other.m_data;
775         }
776         else
777         {
778           Base::operator=(other);
779         }
780       }
781       return *this;
782     }
783 
784 #ifndef EIGEN_PARSED_BY_DOXYGEN
785     template<typename OtherDerived>
786     inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
787     { return Base::operator=(other.derived()); }
788 
789     template<typename Lhs, typename Rhs>
790     inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
791 #endif // EIGEN_PARSED_BY_DOXYGEN
792 
793     template<typename OtherDerived>
794     EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
795 
796     friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797     {
798       EIGEN_DBG_SPARSE(
799         s << "Nonzero entries:\n";
800         if(m.isCompressed())
801         {
802           for (Index i=0; i<m.nonZeros(); ++i)
803             s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804         }
805         else
806         {
807           for (Index i=0; i<m.outerSize(); ++i)
808           {
809             Index p = m.m_outerIndex[i];
810             Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811             Index k=p;
812             for (; k<pe; ++k) {
813               s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814             }
815             for (; k<m.m_outerIndex[i+1]; ++k) {
816               s << "(_,_) ";
817             }
818           }
819         }
820         s << std::endl;
821         s << std::endl;
822         s << "Outer pointers:\n";
823         for (Index i=0; i<m.outerSize(); ++i) {
824           s << m.m_outerIndex[i] << " ";
825         }
826         s << " $" << std::endl;
827         if(!m.isCompressed())
828         {
829           s << "Inner non zeros:\n";
830           for (Index i=0; i<m.outerSize(); ++i) {
831             s << m.m_innerNonZeros[i] << " ";
832           }
833           s << " $" << std::endl;
834         }
835         s << std::endl;
836       );
837       s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838       return s;
839     }
840 
841     /** Destructor */
842     inline ~SparseMatrix()
843     {
844       std::free(m_outerIndex);
845       std::free(m_innerNonZeros);
846     }
847 
848     /** Overloaded for performance */
849     Scalar sum() const;
850 
851 #   ifdef EIGEN_SPARSEMATRIX_PLUGIN
852 #     include EIGEN_SPARSEMATRIX_PLUGIN
853 #   endif
854 
855 protected:
856 
857     template<typename Other>
858     void initAssignment(const Other& other)
859     {
860       resize(other.rows(), other.cols());
861       if(m_innerNonZeros)
862       {
863         std::free(m_innerNonZeros);
864         m_innerNonZeros = 0;
865       }
866     }
867 
868     /** \internal
869       * \sa insert(Index,Index) */
870     EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
871 
872     /** \internal
873       * A vector object that is equal to 0 everywhere but v at the position i */
874     class SingletonVector
875     {
876         StorageIndex m_index;
877         StorageIndex m_value;
878       public:
879         typedef StorageIndex value_type;
880         SingletonVector(Index i, Index v)
881           : m_index(convert_index(i)), m_value(convert_index(v))
882         {}
883 
884         StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885     };
886 
887     /** \internal
888       * \sa insert(Index,Index) */
889     EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
890 
891 public:
892     /** \internal
893       * \sa insert(Index,Index) */
894     EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
895     {
896       const Index outer = IsRowMajor ? row : col;
897       const Index inner = IsRowMajor ? col : row;
898 
899       eigen_assert(!isCompressed());
900       eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901 
902       Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903       m_data.index(p) = convert_index(inner);
904       return (m_data.value(p) = Scalar(0));
905     }
906 protected:
907     struct IndexPosPair {
908       IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
909       Index i;
910       Index p;
911     };
912 
913     /** \internal assign \a diagXpr to the diagonal of \c *this
914       * There are different strategies:
915       *   1 - if *this is overwritten (Func==assign_op) or *this is empty, then we can work treat *this as a dense vector expression.
916       *   2 - otherwise, for each diagonal coeff,
917       *     2.a - if it already exists, then we update it,
918       *     2.b - otherwise, if *this is uncompressed and that the current inner-vector has empty room for at least 1 element, then we perform an in-place insertion.
919       *     2.c - otherwise, we'll have to reallocate and copy everything, so instead of doing so for each new element, it is recorded in a std::vector.
920       *   3 - at the end, if some entries failed to be inserted in-place, then we alloc a new buffer, copy each chunk at the right position, and insert the new elements.
921       *
922       * TODO: some piece of code could be isolated and reused for a general in-place update strategy.
923       * TODO: if we start to defer the insertion of some elements (i.e., case 2.c executed once),
924       *       then it *might* be better to disable case 2.b since they will have to be copied anyway.
925       */
926     template<typename DiagXpr, typename Func>
927     void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928     {
929       Index n = diagXpr.size();
930 
931       const bool overwrite = internal::is_same<Func, internal::assign_op<Scalar,Scalar> >::value;
932       if(overwrite)
933       {
934         if((this->rows()!=n) || (this->cols()!=n))
935           this->resize(n, n);
936       }
937 
938       if(m_data.size()==0 || overwrite)
939       {
940         typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941         this->makeCompressed();
942         this->resizeNonZeros(n);
943         Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944         Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945         Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946         values.setZero();
947         internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948       }
949       else
950       {
951         bool isComp = isCompressed();
952         internal::evaluator<DiagXpr> diaEval(diagXpr);
953         std::vector<IndexPosPair> newEntries;
954 
955         // 1 - try in-place update and record insertion failures
956         for(Index i = 0; i<n; ++i)
957         {
958           internal::LowerBoundIndex lb = this->lower_bound(i,i);
959           Index p = lb.value;
960           if(lb.found)
961           {
962             // the coeff already exists
963             assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964           }
965           else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966           {
967             // non compressed mode with local room for inserting one element
968             m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969             m_innerNonZeros[i]++;
970             m_data.value(p) = Scalar(0);
971             m_data.index(p) = StorageIndex(i);
972             assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973           }
974           else
975           {
976             // defer insertion
977             newEntries.push_back(IndexPosPair(i,p));
978           }
979         }
980         // 2 - insert deferred entries
981         Index n_entries = Index(newEntries.size());
982         if(n_entries>0)
983         {
984           Storage newData(m_data.size()+n_entries);
985           Index prev_p = 0;
986           Index prev_i = 0;
987           for(Index k=0; k<n_entries;++k)
988           {
989             Index i = newEntries[k].i;
990             Index p = newEntries[k].p;
991             internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992             internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993             for(Index j=prev_i;j<i;++j)
994               m_outerIndex[j+1] += k;
995             if(!isComp)
996               m_innerNonZeros[i]++;
997             prev_p = p;
998             prev_i = i;
999             newData.value(p+k) = Scalar(0);
1000             newData.index(p+k) = StorageIndex(i);
1001             assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002           }
1003           {
1004             internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005             internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006             for(Index j=prev_i+1;j<=m_outerSize;++j)
1007               m_outerIndex[j] += n_entries;
1008           }
1009           m_data.swap(newData);
1010         }
1011       }
1012     }
1013 
1014 private:
1015   static void check_template_parameters()
1016   {
1017     EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018     EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019   }
1020 
1021   struct default_prunning_func {
1022     default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023     inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024     {
1025       return !internal::isMuchSmallerThan(value, reference, epsilon);
1026     }
1027     Scalar reference;
1028     RealScalar epsilon;
1029   };
1030 };
1031 
1032 namespace internal {
1033 
1034 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036 {
1037   enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038   typedef typename SparseMatrixType::Scalar Scalar;
1039   typedef typename SparseMatrixType::StorageIndex StorageIndex;
1040   SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,StorageIndex> trMat(mat.rows(),mat.cols());
1041 
1042   if(begin!=end)
1043   {
1044     // pass 1: count the nnz per inner-vector
1045     typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046     wi.setZero();
1047     for(InputIterator it(begin); it!=end; ++it)
1048     {
1049       eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050       wi(IsRowMajor ? it->col() : it->row())++;
1051     }
1052 
1053     // pass 2: insert all the elements into trMat
1054     trMat.reserve(wi);
1055     for(InputIterator it(begin); it!=end; ++it)
1056       trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057 
1058     // pass 3:
1059     trMat.collapseDuplicates(dup_func);
1060   }
1061 
1062   // pass 4: transposed copy -> implicit sorting
1063   mat = trMat;
1064 }
1065 
1066 }
1067 
1068 
1069 /** Fill the matrix \c *this with the list of \em triplets defined by the iterator range \a begin - \a end.
1070   *
1071   * A \em triplet is a tuple (i,j,value) defining a non-zero element.
1072   * The input list of triplets does not have to be sorted, and can contains duplicated elements.
1073   * In any case, the result is a \b sorted and \b compressed sparse matrix where the duplicates have been summed up.
1074   * This is a \em O(n) operation, with \em n the number of triplet elements.
1075   * The initial contents of \c *this is destroyed.
1076   * The matrix \c *this must be properly resized beforehand using the SparseMatrix(Index,Index) constructor,
1077   * or the resize(Index,Index) method. The sizes are not extracted from the triplet list.
1078   *
1079   * The \a InputIterators value_type must provide the following interface:
1080   * \code
1081   * Scalar value() const; // the value
1082   * Scalar row() const;   // the row index i
1083   * Scalar col() const;   // the column index j
1084   * \endcode
1085   * See for instance the Eigen::Triplet template class.
1086   *
1087   * Here is a typical usage example:
1088   * \code
1089     typedef Triplet<double> T;
1090     std::vector<T> tripletList;
1091     tripletList.reserve(estimation_of_entries);
1092     for(...)
1093     {
1094       // ...
1095       tripletList.push_back(T(i,j,v_ij));
1096     }
1097     SparseMatrixType m(rows,cols);
1098     m.setFromTriplets(tripletList.begin(), tripletList.end());
1099     // m is ready to go!
1100   * \endcode
1101   *
1102   * \warning The list of triplets is read multiple times (at least twice). Therefore, it is not recommended to define
1103   * an abstract iterator over a complex data-structure that would be expensive to evaluate. The triplets should rather
1104   * be explicitly stored into a std::vector for instance.
1105   */
1106 template<typename Scalar, int _Options, typename _StorageIndex>
1107 template<typename InputIterators>
1108 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109 {
1110   internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111 }
1112 
1113 /** The same as setFromTriplets but when duplicates are met the functor \a dup_func is applied:
1114   * \code
1115   * value = dup_func(OldValue, NewValue)
1116   * \endcode
1117   * Here is a C++11 example keeping the latest entry only:
1118   * \code
1119   * mat.setFromTriplets(triplets.begin(), triplets.end(), [] (const Scalar&,const Scalar &b) { return b; });
1120   * \endcode
1121   */
1122 template<typename Scalar, int _Options, typename _StorageIndex>
1123 template<typename InputIterators,typename DupFunctor>
1124 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125 {
1126   internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127 }
1128 
1129 /** \internal */
1130 template<typename Scalar, int _Options, typename _StorageIndex>
1131 template<typename DupFunctor>
1132 void SparseMatrix<Scalar,_Options,_StorageIndex>::collapseDuplicates(DupFunctor dup_func)
1133 {
1134   eigen_assert(!isCompressed());
1135   // TODO, in practice we should be able to use m_innerNonZeros for that task
1136   IndexVector wi(innerSize());
1137   wi.fill(-1);
1138   StorageIndex count = 0;
1139   // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140   for(Index j=0; j<outerSize(); ++j)
1141   {
1142     StorageIndex start   = count;
1143     Index oldEnd  = m_outerIndex[j]+m_innerNonZeros[j];
1144     for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145     {
1146       Index i = m_data.index(k);
1147       if(wi(i)>=start)
1148       {
1149         // we already meet this entry => accumulate it
1150         m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151       }
1152       else
1153       {
1154         m_data.value(count) = m_data.value(k);
1155         m_data.index(count) = m_data.index(k);
1156         wi(i) = count;
1157         ++count;
1158       }
1159     }
1160     m_outerIndex[j] = start;
1161   }
1162   m_outerIndex[m_outerSize] = count;
1163 
1164   // turn the matrix into compressed form
1165   std::free(m_innerNonZeros);
1166   m_innerNonZeros = 0;
1167   m_data.resize(m_outerIndex[m_outerSize]);
1168 }
1169 
1170 template<typename Scalar, int _Options, typename _StorageIndex>
1171 template<typename OtherDerived>
1172 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_StorageIndex>& SparseMatrix<Scalar,_Options,_StorageIndex>::operator=(const SparseMatrixBase<OtherDerived>& other)
1173 {
1174   EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
1175         YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176 
1177   #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1178     EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179   #endif
1180 
1181   const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182   if (needToTranspose)
1183   {
1184     #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1185       EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186     #endif
1187     // two passes algorithm:
1188     //  1 - compute the number of coeffs per dest inner vector
1189     //  2 - do the actual copy/eval
1190     // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1191     typedef typename internal::nested_eval<OtherDerived,2,typename internal::plain_matrix_type<OtherDerived>::type >::type OtherCopy;
1192     typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193     typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194     OtherCopy otherCopy(other.derived());
1195     OtherCopyEval otherCopyEval(otherCopy);
1196 
1197     SparseMatrix dest(other.rows(),other.cols());
1198     Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199 
1200     // pass 1
1201     // FIXME the above copy could be merged with that pass
1202     for (Index j=0; j<otherCopy.outerSize(); ++j)
1203       for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204         ++dest.m_outerIndex[it.index()];
1205 
1206     // prefix sum
1207     StorageIndex count = 0;
1208     IndexVector positions(dest.outerSize());
1209     for (Index j=0; j<dest.outerSize(); ++j)
1210     {
1211       StorageIndex tmp = dest.m_outerIndex[j];
1212       dest.m_outerIndex[j] = count;
1213       positions[j] = count;
1214       count += tmp;
1215     }
1216     dest.m_outerIndex[dest.outerSize()] = count;
1217     // alloc
1218     dest.m_data.resize(count);
1219     // pass 2
1220     for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221     {
1222       for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223       {
1224         Index pos = positions[it.index()]++;
1225         dest.m_data.index(pos) = j;
1226         dest.m_data.value(pos) = it.value();
1227       }
1228     }
1229     this->swap(dest);
1230     return *this;
1231   }
1232   else
1233   {
1234     if(other.isRValue())
1235     {
1236       initAssignment(other.derived());
1237     }
1238     // there is no special optimization
1239     return Base::operator=(other.derived());
1240   }
1241 }
1242 
1243 template<typename _Scalar, int _Options, typename _StorageIndex>
1244 typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insert(Index row, Index col)
1245 {
1246   eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247 
1248   const Index outer = IsRowMajor ? row : col;
1249   const Index inner = IsRowMajor ? col : row;
1250 
1251   if(isCompressed())
1252   {
1253     if(nonZeros()==0)
1254     {
1255       // reserve space if not already done
1256       if(m_data.allocatedSize()==0)
1257         m_data.reserve(2*m_innerSize);
1258 
1259       // turn the matrix into non-compressed mode
1260       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261       if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262 
1263       memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264 
1265       // pack all inner-vectors to the end of the pre-allocated space
1266       // and allocate the entire free-space to the first inner-vector
1267       StorageIndex end = convert_index(m_data.allocatedSize());
1268       for(Index j=1; j<=m_outerSize; ++j)
1269         m_outerIndex[j] = end;
1270     }
1271     else
1272     {
1273       // turn the matrix into non-compressed mode
1274       m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275       if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276       for(Index j=0; j<m_outerSize; ++j)
1277         m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278     }
1279   }
1280 
1281   // check whether we can do a fast "push back" insertion
1282   Index data_end = m_data.allocatedSize();
1283 
1284   // First case: we are filling a new inner vector which is packed at the end.
1285   // We assume that all remaining inner-vectors are also empty and packed to the end.
1286   if(m_outerIndex[outer]==data_end)
1287   {
1288     eigen_internal_assert(m_innerNonZeros[outer]==0);
1289 
1290     // pack previous empty inner-vectors to end of the used-space
1291     // and allocate the entire free-space to the current inner-vector.
1292     StorageIndex p = convert_index(m_data.size());
1293     Index j = outer;
1294     while(j>=0 && m_innerNonZeros[j]==0)
1295       m_outerIndex[j--] = p;
1296 
1297     // push back the new element
1298     ++m_innerNonZeros[outer];
1299     m_data.append(Scalar(0), inner);
1300 
1301     // check for reallocation
1302     if(data_end != m_data.allocatedSize())
1303     {
1304       // m_data has been reallocated
1305       //  -> move remaining inner-vectors back to the end of the free-space
1306       //     so that the entire free-space is allocated to the current inner-vector.
1307       eigen_internal_assert(data_end < m_data.allocatedSize());
1308       StorageIndex new_end = convert_index(m_data.allocatedSize());
1309       for(Index k=outer+1; k<=m_outerSize; ++k)
1310         if(m_outerIndex[k]==data_end)
1311           m_outerIndex[k] = new_end;
1312     }
1313     return m_data.value(p);
1314   }
1315 
1316   // Second case: the next inner-vector is packed to the end
1317   // and the current inner-vector end match the used-space.
1318   if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319   {
1320     eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321 
1322     // add space for the new element
1323     ++m_innerNonZeros[outer];
1324     m_data.resize(m_data.size()+1);
1325 
1326     // check for reallocation
1327     if(data_end != m_data.allocatedSize())
1328     {
1329       // m_data has been reallocated
1330       //  -> move remaining inner-vectors back to the end of the free-space
1331       //     so that the entire free-space is allocated to the current inner-vector.
1332       eigen_internal_assert(data_end < m_data.allocatedSize());
1333       StorageIndex new_end = convert_index(m_data.allocatedSize());
1334       for(Index k=outer+1; k<=m_outerSize; ++k)
1335         if(m_outerIndex[k]==data_end)
1336           m_outerIndex[k] = new_end;
1337     }
1338 
1339     // and insert it at the right position (sorted insertion)
1340     Index startId = m_outerIndex[outer];
1341     Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342     while ( (p > startId) && (m_data.index(p-1) > inner) )
1343     {
1344       m_data.index(p) = m_data.index(p-1);
1345       m_data.value(p) = m_data.value(p-1);
1346       --p;
1347     }
1348 
1349     m_data.index(p) = convert_index(inner);
1350     return (m_data.value(p) = Scalar(0));
1351   }
1352 
1353   if(m_data.size() != m_data.allocatedSize())
1354   {
1355     // make sure the matrix is compatible to random un-compressed insertion:
1356     m_data.resize(m_data.allocatedSize());
1357     this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358   }
1359 
1360   return insertUncompressed(row,col);
1361 }
1362 
1363 template<typename _Scalar, int _Options, typename _StorageIndex>
1364 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertUncompressed(Index row, Index col)
1365 {
1366   eigen_assert(!isCompressed());
1367 
1368   const Index outer = IsRowMajor ? row : col;
1369   const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370 
1371   Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372   StorageIndex innerNNZ = m_innerNonZeros[outer];
1373   if(innerNNZ>=room)
1374   {
1375     // this inner vector is full, we need to reallocate the whole buffer :(
1376     reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377   }
1378 
1379   Index startId = m_outerIndex[outer];
1380   Index p = startId + m_innerNonZeros[outer];
1381   while ( (p > startId) && (m_data.index(p-1) > inner) )
1382   {
1383     m_data.index(p) = m_data.index(p-1);
1384     m_data.value(p) = m_data.value(p-1);
1385     --p;
1386   }
1387   eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388 
1389   m_innerNonZeros[outer]++;
1390 
1391   m_data.index(p) = inner;
1392   return (m_data.value(p) = Scalar(0));
1393 }
1394 
1395 template<typename _Scalar, int _Options, typename _StorageIndex>
1396 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_StorageIndex>::Scalar& SparseMatrix<_Scalar,_Options,_StorageIndex>::insertCompressed(Index row, Index col)
1397 {
1398   eigen_assert(isCompressed());
1399 
1400   const Index outer = IsRowMajor ? row : col;
1401   const Index inner = IsRowMajor ? col : row;
1402 
1403   Index previousOuter = outer;
1404   if (m_outerIndex[outer+1]==0)
1405   {
1406     // we start a new inner vector
1407     while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408     {
1409       m_outerIndex[previousOuter] = convert_index(m_data.size());
1410       --previousOuter;
1411     }
1412     m_outerIndex[outer+1] = m_outerIndex[outer];
1413   }
1414 
1415   // here we have to handle the tricky case where the outerIndex array
1416   // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417   // the 2nd inner vector...
1418   bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419                 && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420 
1421   std::size_t startId = m_outerIndex[outer];
1422   // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423   std::size_t p = m_outerIndex[outer+1];
1424   ++m_outerIndex[outer+1];
1425 
1426   double reallocRatio = 1;
1427   if (m_data.allocatedSize()<=m_data.size())
1428   {
1429     // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430     if (m_data.size()==0)
1431     {
1432       m_data.reserve(32);
1433     }
1434     else
1435     {
1436       // we need to reallocate the data, to reduce multiple reallocations
1437       // we use a smart resize algorithm based on the current filling ratio
1438       // in addition, we use double to avoid integers overflows
1439       double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440       reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441       // furthermore we bound the realloc ratio to:
1442       //   1) reduce multiple minor realloc when the matrix is almost filled
1443       //   2) avoid to allocate too much memory when the matrix is almost empty
1444       reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445     }
1446   }
1447   m_data.resize(m_data.size()+1,reallocRatio);
1448 
1449   if (!isLastVec)
1450   {
1451     if (previousOuter==-1)
1452     {
1453       // oops wrong guess.
1454       // let's correct the outer offsets
1455       for (Index k=0; k<=(outer+1); ++k)
1456         m_outerIndex[k] = 0;
1457       Index k=outer+1;
1458       while(m_outerIndex[k]==0)
1459         m_outerIndex[k++] = 1;
1460       while (k<=m_outerSize && m_outerIndex[k]!=0)
1461         m_outerIndex[k++]++;
1462       p = 0;
1463       --k;
1464       k = m_outerIndex[k]-1;
1465       while (k>0)
1466       {
1467         m_data.index(k) = m_data.index(k-1);
1468         m_data.value(k) = m_data.value(k-1);
1469         k--;
1470       }
1471     }
1472     else
1473     {
1474       // we are not inserting into the last inner vec
1475       // update outer indices:
1476       Index j = outer+2;
1477       while (j<=m_outerSize && m_outerIndex[j]!=0)
1478         m_outerIndex[j++]++;
1479       --j;
1480       // shift data of last vecs:
1481       Index k = m_outerIndex[j]-1;
1482       while (k>=Index(p))
1483       {
1484         m_data.index(k) = m_data.index(k-1);
1485         m_data.value(k) = m_data.value(k-1);
1486         k--;
1487       }
1488     }
1489   }
1490 
1491   while ( (p > startId) && (m_data.index(p-1) > inner) )
1492   {
1493     m_data.index(p) = m_data.index(p-1);
1494     m_data.value(p) = m_data.value(p-1);
1495     --p;
1496   }
1497 
1498   m_data.index(p) = inner;
1499   return (m_data.value(p) = Scalar(0));
1500 }
1501 
1502 namespace internal {
1503 
1504 template<typename _Scalar, int _Options, typename _StorageIndex>
1505 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506   : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507 {
1508   typedef evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > > Base;
1509   typedef SparseMatrix<_Scalar,_Options,_StorageIndex> SparseMatrixType;
1510   evaluator() : Base() {}
1511   explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512 };
1513 
1514 }
1515 
1516 } // end namespace Eigen
1517 
1518 #endif // EIGEN_SPARSEMATRIX_H
1519