1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_AMBIVECTOR_H
11 #define EIGEN_AMBIVECTOR_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 /** \internal
18   * Hybrid sparse/dense vector class designed for intensive read-write operations.
19   *
20   * See BasicSparseLLT and SparseProduct for usage examples.
21   */
22 template<typename _Scalar, typename _StorageIndex>
23 class AmbiVector
24 {
25   public:
26     typedef _Scalar Scalar;
27     typedef _StorageIndex StorageIndex;
28     typedef typename NumTraits<Scalar>::Real RealScalar;
29 
AmbiVector(Index size)30     explicit AmbiVector(Index size)
31       : m_buffer(0), m_zero(0), m_size(0), m_end(0), m_allocatedSize(0), m_allocatedElements(0), m_mode(-1)
32     {
33       resize(size);
34     }
35 
36     void init(double estimatedDensity);
37     void init(int mode);
38 
39     Index nonZeros() const;
40 
41     /** Specifies a sub-vector to work on */
setBounds(Index start,Index end)42     void setBounds(Index start, Index end) { m_start = convert_index(start); m_end = convert_index(end); }
43 
44     void setZero();
45 
46     void restart();
47     Scalar& coeffRef(Index i);
48     Scalar& coeff(Index i);
49 
50     class Iterator;
51 
~AmbiVector()52     ~AmbiVector() { delete[] m_buffer; }
53 
resize(Index size)54     void resize(Index size)
55     {
56       if (m_allocatedSize < size)
57         reallocate(size);
58       m_size = convert_index(size);
59     }
60 
size()61     StorageIndex size() const { return m_size; }
62 
63   protected:
convert_index(Index idx)64     StorageIndex convert_index(Index idx)
65     {
66       return internal::convert_index<StorageIndex>(idx);
67     }
68 
reallocate(Index size)69     void reallocate(Index size)
70     {
71       // if the size of the matrix is not too large, let's allocate a bit more than needed such
72       // that we can handle dense vector even in sparse mode.
73       delete[] m_buffer;
74       if (size<1000)
75       {
76         Index allocSize = (size * sizeof(ListEl) + sizeof(Scalar) - 1)/sizeof(Scalar);
77         m_allocatedElements = convert_index((allocSize*sizeof(Scalar))/sizeof(ListEl));
78         m_buffer = new Scalar[allocSize];
79       }
80       else
81       {
82         m_allocatedElements = convert_index((size*sizeof(Scalar))/sizeof(ListEl));
83         m_buffer = new Scalar[size];
84       }
85       m_size = convert_index(size);
86       m_start = 0;
87       m_end = m_size;
88     }
89 
reallocateSparse()90     void reallocateSparse()
91     {
92       Index copyElements = m_allocatedElements;
93       m_allocatedElements = (std::min)(StorageIndex(m_allocatedElements*1.5),m_size);
94       Index allocSize = m_allocatedElements * sizeof(ListEl);
95       allocSize = (allocSize + sizeof(Scalar) - 1)/sizeof(Scalar);
96       Scalar* newBuffer = new Scalar[allocSize];
97       std::memcpy(newBuffer,  m_buffer,  copyElements * sizeof(ListEl));
98       delete[] m_buffer;
99       m_buffer = newBuffer;
100     }
101 
102   protected:
103     // element type of the linked list
104     struct ListEl
105     {
106       StorageIndex next;
107       StorageIndex index;
108       Scalar value;
109     };
110 
111     // used to store data in both mode
112     Scalar* m_buffer;
113     Scalar m_zero;
114     StorageIndex m_size;
115     StorageIndex m_start;
116     StorageIndex m_end;
117     StorageIndex m_allocatedSize;
118     StorageIndex m_allocatedElements;
119     StorageIndex m_mode;
120 
121     // linked list mode
122     StorageIndex m_llStart;
123     StorageIndex m_llCurrent;
124     StorageIndex m_llSize;
125 };
126 
127 /** \returns the number of non zeros in the current sub vector */
128 template<typename _Scalar,typename _StorageIndex>
nonZeros()129 Index AmbiVector<_Scalar,_StorageIndex>::nonZeros() const
130 {
131   if (m_mode==IsSparse)
132     return m_llSize;
133   else
134     return m_end - m_start;
135 }
136 
137 template<typename _Scalar,typename _StorageIndex>
init(double estimatedDensity)138 void AmbiVector<_Scalar,_StorageIndex>::init(double estimatedDensity)
139 {
140   if (estimatedDensity>0.1)
141     init(IsDense);
142   else
143     init(IsSparse);
144 }
145 
146 template<typename _Scalar,typename _StorageIndex>
init(int mode)147 void AmbiVector<_Scalar,_StorageIndex>::init(int mode)
148 {
149   m_mode = mode;
150   // This is only necessary in sparse mode, but we set these unconditionally to avoid some maybe-uninitialized warnings
151   // if (m_mode==IsSparse)
152   {
153     m_llSize = 0;
154     m_llStart = -1;
155   }
156 }
157 
158 /** Must be called whenever we might perform a write access
159   * with an index smaller than the previous one.
160   *
161   * Don't worry, this function is extremely cheap.
162   */
163 template<typename _Scalar,typename _StorageIndex>
restart()164 void AmbiVector<_Scalar,_StorageIndex>::restart()
165 {
166   m_llCurrent = m_llStart;
167 }
168 
169 /** Set all coefficients of current subvector to zero */
170 template<typename _Scalar,typename _StorageIndex>
setZero()171 void AmbiVector<_Scalar,_StorageIndex>::setZero()
172 {
173   if (m_mode==IsDense)
174   {
175     for (Index i=m_start; i<m_end; ++i)
176       m_buffer[i] = Scalar(0);
177   }
178   else
179   {
180     eigen_assert(m_mode==IsSparse);
181     m_llSize = 0;
182     m_llStart = -1;
183   }
184 }
185 
186 template<typename _Scalar,typename _StorageIndex>
coeffRef(Index i)187 _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeffRef(Index i)
188 {
189   if (m_mode==IsDense)
190     return m_buffer[i];
191   else
192   {
193     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
194     // TODO factorize the following code to reduce code generation
195     eigen_assert(m_mode==IsSparse);
196     if (m_llSize==0)
197     {
198       // this is the first element
199       m_llStart = 0;
200       m_llCurrent = 0;
201       ++m_llSize;
202       llElements[0].value = Scalar(0);
203       llElements[0].index = convert_index(i);
204       llElements[0].next = -1;
205       return llElements[0].value;
206     }
207     else if (i<llElements[m_llStart].index)
208     {
209       // this is going to be the new first element of the list
210       ListEl& el = llElements[m_llSize];
211       el.value = Scalar(0);
212       el.index = convert_index(i);
213       el.next = m_llStart;
214       m_llStart = m_llSize;
215       ++m_llSize;
216       m_llCurrent = m_llStart;
217       return el.value;
218     }
219     else
220     {
221       StorageIndex nextel = llElements[m_llCurrent].next;
222       eigen_assert(i>=llElements[m_llCurrent].index && "you must call restart() before inserting an element with lower or equal index");
223       while (nextel >= 0 && llElements[nextel].index<=i)
224       {
225         m_llCurrent = nextel;
226         nextel = llElements[nextel].next;
227       }
228 
229       if (llElements[m_llCurrent].index==i)
230       {
231         // the coefficient already exists and we found it !
232         return llElements[m_llCurrent].value;
233       }
234       else
235       {
236         if (m_llSize>=m_allocatedElements)
237         {
238           reallocateSparse();
239           llElements = reinterpret_cast<ListEl*>(m_buffer);
240         }
241         eigen_internal_assert(m_llSize<m_allocatedElements && "internal error: overflow in sparse mode");
242         // let's insert a new coefficient
243         ListEl& el = llElements[m_llSize];
244         el.value = Scalar(0);
245         el.index = convert_index(i);
246         el.next = llElements[m_llCurrent].next;
247         llElements[m_llCurrent].next = m_llSize;
248         ++m_llSize;
249         return el.value;
250       }
251     }
252   }
253 }
254 
255 template<typename _Scalar,typename _StorageIndex>
coeff(Index i)256 _Scalar& AmbiVector<_Scalar,_StorageIndex>::coeff(Index i)
257 {
258   if (m_mode==IsDense)
259     return m_buffer[i];
260   else
261   {
262     ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_buffer);
263     eigen_assert(m_mode==IsSparse);
264     if ((m_llSize==0) || (i<llElements[m_llStart].index))
265     {
266       return m_zero;
267     }
268     else
269     {
270       Index elid = m_llStart;
271       while (elid >= 0 && llElements[elid].index<i)
272         elid = llElements[elid].next;
273 
274       if (llElements[elid].index==i)
275         return llElements[m_llCurrent].value;
276       else
277         return m_zero;
278     }
279   }
280 }
281 
282 /** Iterator over the nonzero coefficients */
283 template<typename _Scalar,typename _StorageIndex>
284 class AmbiVector<_Scalar,_StorageIndex>::Iterator
285 {
286   public:
287     typedef _Scalar Scalar;
288     typedef typename NumTraits<Scalar>::Real RealScalar;
289 
290     /** Default constructor
291       * \param vec the vector on which we iterate
292       * \param epsilon the minimal value used to prune zero coefficients.
293       * In practice, all coefficients having a magnitude smaller than \a epsilon
294       * are skipped.
295       */
296     explicit Iterator(const AmbiVector& vec, const RealScalar& epsilon = 0)
m_vector(vec)297       : m_vector(vec)
298     {
299       using std::abs;
300       m_epsilon = epsilon;
301       m_isDense = m_vector.m_mode==IsDense;
302       if (m_isDense)
303       {
304         m_currentEl = 0;   // this is to avoid a compilation warning
305         m_cachedValue = 0; // this is to avoid a compilation warning
306         m_cachedIndex = m_vector.m_start-1;
307         ++(*this);
308       }
309       else
310       {
311         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
312         m_currentEl = m_vector.m_llStart;
313         while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon)
314           m_currentEl = llElements[m_currentEl].next;
315         if (m_currentEl<0)
316         {
317           m_cachedValue = 0; // this is to avoid a compilation warning
318           m_cachedIndex = -1;
319         }
320         else
321         {
322           m_cachedIndex = llElements[m_currentEl].index;
323           m_cachedValue = llElements[m_currentEl].value;
324         }
325       }
326     }
327 
index()328     StorageIndex index() const { return m_cachedIndex; }
value()329     Scalar value() const { return m_cachedValue; }
330 
331     operator bool() const { return m_cachedIndex>=0; }
332 
333     Iterator& operator++()
334     {
335       using std::abs;
336       if (m_isDense)
337       {
338         do {
339           ++m_cachedIndex;
340         } while (m_cachedIndex<m_vector.m_end && abs(m_vector.m_buffer[m_cachedIndex])<=m_epsilon);
341         if (m_cachedIndex<m_vector.m_end)
342           m_cachedValue = m_vector.m_buffer[m_cachedIndex];
343         else
344           m_cachedIndex=-1;
345       }
346       else
347       {
348         ListEl* EIGEN_RESTRICT llElements = reinterpret_cast<ListEl*>(m_vector.m_buffer);
349         do {
350           m_currentEl = llElements[m_currentEl].next;
351         } while (m_currentEl>=0 && abs(llElements[m_currentEl].value)<=m_epsilon);
352         if (m_currentEl<0)
353         {
354           m_cachedIndex = -1;
355         }
356         else
357         {
358           m_cachedIndex = llElements[m_currentEl].index;
359           m_cachedValue = llElements[m_currentEl].value;
360         }
361       }
362       return *this;
363     }
364 
365   protected:
366     const AmbiVector& m_vector; // the target vector
367     StorageIndex m_currentEl;   // the current element in sparse/linked-list mode
368     RealScalar m_epsilon;       // epsilon used to prune zero coefficients
369     StorageIndex m_cachedIndex; // current coordinate
370     Scalar m_cachedValue;       // current value
371     bool m_isDense;             // mode of the vector
372 };
373 
374 } // end namespace internal
375 
376 } // end namespace Eigen
377 
378 #endif // EIGEN_AMBIVECTOR_H
379