1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_BLOCK_H
11 #define EIGEN_SPARSE_BLOCK_H
12 
13 namespace Eigen {
14 
15 // Subset of columns or rows
16 template<typename XprType, int BlockRows, int BlockCols>
17 class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
18   : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
19 {
20     typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
21     typedef Block<XprType, BlockRows, BlockCols, true> BlockType;
22 public:
23     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
24 protected:
25     enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
26     typedef SparseMatrixBase<BlockType> Base;
27     using Base::convert_index;
28 public:
EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)29     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
30 
31     inline BlockImpl(XprType& xpr, Index i)
32       : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
33     {}
34 
BlockImpl(XprType & xpr,Index startRow,Index startCol,Index blockRows,Index blockCols)35     inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
36       : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
37     {}
38 
rows()39     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
cols()40     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
41 
nonZeros()42     Index nonZeros() const
43     {
44       typedef internal::evaluator<XprType> EvaluatorType;
45       EvaluatorType matEval(m_matrix);
46       Index nnz = 0;
47       Index end = m_outerStart + m_outerSize.value();
48       for(Index j=m_outerStart; j<end; ++j)
49         for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
50           ++nnz;
51       return nnz;
52     }
53 
coeff(Index row,Index col)54     inline const Scalar coeff(Index row, Index col) const
55     {
56       return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
57     }
58 
coeff(Index index)59     inline const Scalar coeff(Index index) const
60     {
61       return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
62     }
63 
nestedExpression()64     inline const XprType& nestedExpression() const { return m_matrix; }
nestedExpression()65     inline XprType& nestedExpression() { return m_matrix; }
startRow()66     Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
startCol()67     Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
blockRows()68     Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
blockCols()69     Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
70 
71   protected:
72 
73     typename internal::ref_selector<XprType>::non_const_type m_matrix;
74     Index m_outerStart;
75     const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
76 
77   protected:
78     // Disable assignment with clear error message.
79     // Note that simply removing operator= yields compilation errors with ICC+MSVC
80     template<typename T>
81     BlockImpl& operator=(const T&)
82     {
83       EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
84       return *this;
85     }
86 };
87 
88 
89 /***************************************************************************
90 * specialization for SparseMatrix
91 ***************************************************************************/
92 
93 namespace internal {
94 
95 template<typename SparseMatrixType, int BlockRows, int BlockCols>
96 class sparse_matrix_block_impl
97   : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
98 {
99     typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _MatrixTypeNested;
100     typedef Block<SparseMatrixType, BlockRows, BlockCols, true> BlockType;
101     typedef SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> > Base;
102     using Base::convert_index;
103 public:
104     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
105     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
106 protected:
107     typedef typename Base::IndexVector IndexVector;
108     enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
109 public:
110 
sparse_matrix_block_impl(SparseMatrixType & xpr,Index i)111     inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
112       : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
113     {}
114 
sparse_matrix_block_impl(SparseMatrixType & xpr,Index startRow,Index startCol,Index blockRows,Index blockCols)115     inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
116       : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
117     {}
118 
119     template<typename OtherDerived>
120     inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
121     {
122       typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
123       _NestedMatrixType& matrix = m_matrix;
124       // This assignment is slow if this vector set is not empty
125       // and/or it is not at the end of the nonzeros of the underlying matrix.
126 
127       // 1 - eval to a temporary to avoid transposition and/or aliasing issues
128       Ref<const SparseMatrix<Scalar, IsRowMajor ? RowMajor : ColMajor, StorageIndex> > tmp(other.derived());
129       eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
130 
131       // 2 - let's check whether there is enough allocated memory
132       Index nnz           = tmp.nonZeros();
133       Index start         = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
134       Index end           = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
135       Index block_size    = end - start;                                                // available room in the current block
136       Index tail_size     = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
137 
138       Index free_size     = m_matrix.isCompressed()
139                           ? Index(matrix.data().allocatedSize()) + block_size
140                           : block_size;
141 
142       Index tmp_start = tmp.outerIndexPtr()[0];
143 
144       bool update_trailing_pointers = false;
145       if(nnz>free_size)
146       {
147         // realloc manually to reduce copies
148         typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
149 
150         internal::smart_copy(m_matrix.valuePtr(),       m_matrix.valuePtr() + start,      newdata.valuePtr());
151         internal::smart_copy(m_matrix.innerIndexPtr(),  m_matrix.innerIndexPtr() + start, newdata.indexPtr());
152 
153         internal::smart_copy(tmp.valuePtr() + tmp_start,      tmp.valuePtr() + tmp_start + nnz,       newdata.valuePtr() + start);
154         internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz,  newdata.indexPtr() + start);
155 
156         internal::smart_copy(matrix.valuePtr()+end,       matrix.valuePtr()+end + tail_size,      newdata.valuePtr()+start+nnz);
157         internal::smart_copy(matrix.innerIndexPtr()+end,  matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
158 
159         newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
160 
161         matrix.data().swap(newdata);
162 
163         update_trailing_pointers = true;
164       }
165       else
166       {
167         if(m_matrix.isCompressed() && nnz!=block_size)
168         {
169           // no need to realloc, simply copy the tail at its respective position and insert tmp
170           matrix.data().resize(start + nnz + tail_size);
171 
172           internal::smart_memmove(matrix.valuePtr()+end,      matrix.valuePtr() + end+tail_size,      matrix.valuePtr() + start+nnz);
173           internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
174 
175           update_trailing_pointers = true;
176         }
177 
178         internal::smart_copy(tmp.valuePtr() + tmp_start,      tmp.valuePtr() + tmp_start + nnz,       matrix.valuePtr() + start);
179         internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz,  matrix.innerIndexPtr() + start);
180       }
181 
182       // update outer index pointers and innerNonZeros
183       if(IsVectorAtCompileTime)
184       {
185         if(!m_matrix.isCompressed())
186           matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
187         matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
188       }
189       else
190       {
191         StorageIndex p = StorageIndex(start);
192         for(Index k=0; k<m_outerSize.value(); ++k)
193         {
194           StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
195           if(!m_matrix.isCompressed())
196             matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
197           matrix.outerIndexPtr()[m_outerStart+k] = p;
198           p += nnz_k;
199         }
200       }
201 
202       if(update_trailing_pointers)
203       {
204         StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
205         for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
206         {
207           matrix.outerIndexPtr()[k] += offset;
208         }
209       }
210 
211       return derived();
212     }
213 
214     inline BlockType& operator=(const BlockType& other)
215     {
216       return operator=<BlockType>(other);
217     }
218 
valuePtr()219     inline const Scalar* valuePtr() const
220     { return m_matrix.valuePtr(); }
valuePtr()221     inline Scalar* valuePtr()
222     { return m_matrix.valuePtr(); }
223 
innerIndexPtr()224     inline const StorageIndex* innerIndexPtr() const
225     { return m_matrix.innerIndexPtr(); }
innerIndexPtr()226     inline StorageIndex* innerIndexPtr()
227     { return m_matrix.innerIndexPtr(); }
228 
outerIndexPtr()229     inline const StorageIndex* outerIndexPtr() const
230     { return m_matrix.outerIndexPtr() + m_outerStart; }
outerIndexPtr()231     inline StorageIndex* outerIndexPtr()
232     { return m_matrix.outerIndexPtr() + m_outerStart; }
233 
innerNonZeroPtr()234     inline const StorageIndex* innerNonZeroPtr() const
235     { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
innerNonZeroPtr()236     inline StorageIndex* innerNonZeroPtr()
237     { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
238 
isCompressed()239     bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
240 
coeffRef(Index row,Index col)241     inline Scalar& coeffRef(Index row, Index col)
242     {
243       return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
244     }
245 
coeff(Index row,Index col)246     inline const Scalar coeff(Index row, Index col) const
247     {
248       return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 :  m_outerStart));
249     }
250 
coeff(Index index)251     inline const Scalar coeff(Index index) const
252     {
253       return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index :  m_outerStart);
254     }
255 
lastCoeff()256     const Scalar& lastCoeff() const
257     {
258       EIGEN_STATIC_ASSERT_VECTOR_ONLY(sparse_matrix_block_impl);
259       eigen_assert(Base::nonZeros()>0);
260       if(m_matrix.isCompressed())
261         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
262       else
263         return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
264     }
265 
rows()266     EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
cols()267     EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
268 
nestedExpression()269     inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
nestedExpression()270     inline SparseMatrixType& nestedExpression() { return m_matrix; }
startRow()271     Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
startCol()272     Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
blockRows()273     Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
blockCols()274     Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
275 
276   protected:
277 
278     typename internal::ref_selector<SparseMatrixType>::non_const_type m_matrix;
279     Index m_outerStart;
280     const internal::variable_if_dynamic<Index, OuterSize> m_outerSize;
281 
282 };
283 
284 } // namespace internal
285 
286 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
287 class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
288   : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
289 {
290 public:
291   typedef _StorageIndex StorageIndex;
292   typedef SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
293   typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
BlockImpl(SparseMatrixType & xpr,Index i)294   inline BlockImpl(SparseMatrixType& xpr, Index i)
295     : Base(xpr, i)
296   {}
297 
BlockImpl(SparseMatrixType & xpr,Index startRow,Index startCol,Index blockRows,Index blockCols)298   inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
299     : Base(xpr, startRow, startCol, blockRows, blockCols)
300   {}
301 
302   using Base::operator=;
303 };
304 
305 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
306 class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
307   : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
308 {
309 public:
310   typedef _StorageIndex StorageIndex;
311   typedef const SparseMatrix<_Scalar, _Options, _StorageIndex> SparseMatrixType;
312   typedef internal::sparse_matrix_block_impl<SparseMatrixType,BlockRows,BlockCols> Base;
BlockImpl(SparseMatrixType & xpr,Index i)313   inline BlockImpl(SparseMatrixType& xpr, Index i)
314     : Base(xpr, i)
315   {}
316 
BlockImpl(SparseMatrixType & xpr,Index startRow,Index startCol,Index blockRows,Index blockCols)317   inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
318     : Base(xpr, startRow, startCol, blockRows, blockCols)
319   {}
320 
321   using Base::operator=;
322 private:
323   template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
324   template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
325 };
326 
327 //----------
328 
329 /** Generic implementation of sparse Block expression.
330   * Real-only.
331   */
332 template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
333 class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
334   : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
335 {
336     typedef Block<XprType, BlockRows, BlockCols, InnerPanel> BlockType;
337     typedef SparseMatrixBase<BlockType> Base;
338     using Base::convert_index;
339 public:
340     enum { IsRowMajor = internal::traits<BlockType>::IsRowMajor };
341     EIGEN_SPARSE_PUBLIC_INTERFACE(BlockType)
342 
343     typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
344 
345     /** Column or Row constructor
346       */
BlockImpl(XprType & xpr,Index i)347     inline BlockImpl(XprType& xpr, Index i)
348       : m_matrix(xpr),
349         m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
350         m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
351         m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
352         m_blockCols(BlockCols==1 ? 1 : xpr.cols())
353     {}
354 
355     /** Dynamic-size constructor
356       */
BlockImpl(XprType & xpr,Index startRow,Index startCol,Index blockRows,Index blockCols)357     inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
358       : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
359     {}
360 
rows()361     inline Index rows() const { return m_blockRows.value(); }
cols()362     inline Index cols() const { return m_blockCols.value(); }
363 
coeffRef(Index row,Index col)364     inline Scalar& coeffRef(Index row, Index col)
365     {
366       return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
367     }
368 
coeff(Index row,Index col)369     inline const Scalar coeff(Index row, Index col) const
370     {
371       return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
372     }
373 
coeffRef(Index index)374     inline Scalar& coeffRef(Index index)
375     {
376       return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
377                                m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
378     }
379 
coeff(Index index)380     inline const Scalar coeff(Index index) const
381     {
382       return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
383                             m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
384     }
385 
nestedExpression()386     inline const XprType& nestedExpression() const { return m_matrix; }
nestedExpression()387     inline XprType& nestedExpression() { return m_matrix; }
startRow()388     Index startRow() const { return m_startRow.value(); }
startCol()389     Index startCol() const { return m_startCol.value(); }
blockRows()390     Index blockRows() const { return m_blockRows.value(); }
blockCols()391     Index blockCols() const { return m_blockCols.value(); }
392 
393   protected:
394 //     friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
395     friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
396 
397     Index nonZeros() const { return Dynamic; }
398 
399     typename internal::ref_selector<XprType>::non_const_type m_matrix;
400     const internal::variable_if_dynamic<Index, XprType::RowsAtCompileTime == 1 ? 0 : Dynamic> m_startRow;
401     const internal::variable_if_dynamic<Index, XprType::ColsAtCompileTime == 1 ? 0 : Dynamic> m_startCol;
402     const internal::variable_if_dynamic<Index, RowsAtCompileTime> m_blockRows;
403     const internal::variable_if_dynamic<Index, ColsAtCompileTime> m_blockCols;
404 
405   protected:
406     // Disable assignment with clear error message.
407     // Note that simply removing operator= yields compilation errors with ICC+MSVC
408     template<typename T>
409     BlockImpl& operator=(const T&)
410     {
411       EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
412       return *this;
413     }
414 
415 };
416 
417 namespace internal {
418 
419 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
420 struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
421  : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
422 {
423     class InnerVectorInnerIterator;
424     class OuterVectorInnerIterator;
425   public:
426     typedef Block<ArgType,BlockRows,BlockCols,InnerPanel> XprType;
427     typedef typename XprType::StorageIndex StorageIndex;
428     typedef typename XprType::Scalar Scalar;
429 
430     enum {
431       IsRowMajor = XprType::IsRowMajor,
432 
433       OuterVector =  (BlockCols==1 && ArgType::IsRowMajor)
434                     | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
435                       // revert to || as soon as not needed anymore.
436                      (BlockRows==1 && !ArgType::IsRowMajor),
437 
438       CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
439       Flags = XprType::Flags
440     };
441 
442     typedef typename internal::conditional<OuterVector,OuterVectorInnerIterator,InnerVectorInnerIterator>::type InnerIterator;
443 
444     explicit unary_evaluator(const XprType& op)
445       : m_argImpl(op.nestedExpression()), m_block(op)
446     {}
447 
448     inline Index nonZerosEstimate() const {
449       const Index nnz = m_block.nonZeros();
450       if(nnz < 0) {
451         // Scale the non-zero estimate for the underlying expression linearly with block size.
452         // Return zero if the underlying block is empty.
453         const Index nested_sz = m_block.nestedExpression().size();
454         return nested_sz == 0 ? 0 : m_argImpl.nonZerosEstimate() * m_block.size() / nested_sz;
455       }
456       return nnz;
457     }
458 
459   protected:
460     typedef typename evaluator<ArgType>::InnerIterator EvalIterator;
461 
462     evaluator<ArgType> m_argImpl;
463     const XprType &m_block;
464 };
465 
466 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
467 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
468  : public EvalIterator
469 {
470   // NOTE MSVC fails to compile if we don't explicitely "import" IsRowMajor from unary_evaluator
471   //      because the base class EvalIterator has a private IsRowMajor enum too. (bug #1786)
472   // NOTE We cannot call it IsRowMajor because it would shadow unary_evaluator::IsRowMajor
473   enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
474   const XprType& m_block;
475   Index m_end;
476 public:
477 
478   EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator& aEval, Index outer)
479     : EvalIterator(aEval.m_argImpl, outer + (XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
480       m_block(aEval.m_block),
481       m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
482   {
483     while( (EvalIterator::operator bool()) && (EvalIterator::index() < (XprIsRowMajor ? m_block.startCol() : m_block.startRow())) )
484       EvalIterator::operator++();
485   }
486 
487   inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(XprIsRowMajor ? m_block.startCol() : m_block.startRow()); }
488   inline Index outer()  const { return EvalIterator::outer() - (XprIsRowMajor ? m_block.startRow() : m_block.startCol()); }
489   inline Index row()    const { return EvalIterator::row()   - m_block.startRow(); }
490   inline Index col()    const { return EvalIterator::col()   - m_block.startCol(); }
491 
492   inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
493 };
494 
495 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
496 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
497 {
498   // NOTE see above
499   enum { XprIsRowMajor = unary_evaluator::IsRowMajor };
500   const unary_evaluator& m_eval;
501   Index m_outerPos;
502   const Index m_innerIndex;
503   Index m_end;
504   EvalIterator m_it;
505 public:
506 
507   EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator& aEval, Index outer)
508     : m_eval(aEval),
509       m_outerPos( (XprIsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
510       m_innerIndex(XprIsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
511       m_end(XprIsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
512       m_it(m_eval.m_argImpl, m_outerPos)
513   {
514     EIGEN_UNUSED_VARIABLE(outer);
515     eigen_assert(outer==0);
516 
517     while(m_it && m_it.index() < m_innerIndex) ++m_it;
518     if((!m_it) || (m_it.index()!=m_innerIndex))
519       ++(*this);
520   }
521 
522   inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (XprIsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
523   inline Index outer()  const { return 0; }
524   inline Index row()    const { return XprIsRowMajor ? 0 : index(); }
525   inline Index col()    const { return XprIsRowMajor ? index() : 0; }
526 
527   inline Scalar value() const { return m_it.value(); }
528   inline Scalar& valueRef() { return m_it.valueRef(); }
529 
530   inline OuterVectorInnerIterator& operator++()
531   {
532     // search next non-zero entry
533     while(++m_outerPos<m_end)
534     {
535       // Restart iterator at the next inner-vector:
536       m_it.~EvalIterator();
537       ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
538       // search for the key m_innerIndex in the current outer-vector
539       while(m_it && m_it.index() < m_innerIndex) ++m_it;
540       if(m_it && m_it.index()==m_innerIndex) break;
541     }
542     return *this;
543   }
544 
545   inline operator bool() const { return m_outerPos < m_end; }
546 };
547 
548 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
549 struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
550   : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
551 {
552   typedef Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
553   typedef evaluator<SparseCompressedBase<XprType> > Base;
554   explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
555 };
556 
557 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
558 struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
559   : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
560 {
561   typedef Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> XprType;
562   typedef evaluator<SparseCompressedBase<XprType> > Base;
563   explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
564 };
565 
566 } // end namespace internal
567 
568 
569 } // end namespace Eigen
570 
571 #endif // EIGEN_SPARSE_BLOCK_H
572