1 /* Sparse_Row class implementation (non-inline functions).
2    Copyright (C) 2001-2010 Roberto Bagnara <bagnara@cs.unipr.it>
3    Copyright (C) 2010-2016 BUGSENG srl (http://bugseng.com)
4 
5 This file is part of the Parma Polyhedra Library (PPL).
6 
7 The PPL is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by the
9 Free Software Foundation; either version 3 of the License, or (at your
10 option) any later version.
11 
12 The PPL is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
15 for more details.
16 
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software Foundation,
19 Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111-1307, USA.
20 
21 For the most up-to-date information see the Parma Polyhedra Library
22 site: http://bugseng.com/products/ppl/ . */
23 
24 #include "ppl-config.h"
25 #include "Sparse_Row_defs.hh"
26 #include "Dense_Row_defs.hh"
27 
28 namespace PPL = Parma_Polyhedra_Library;
29 
30 namespace {
31 
32 class Sparse_Row_from_Dense_Row_helper_iterator {
33 public:
Sparse_Row_from_Dense_Row_helper_iterator(const PPL::Dense_Row & r,PPL::dimension_type sz)34   Sparse_Row_from_Dense_Row_helper_iterator(const PPL::Dense_Row& r,
35                                             PPL::dimension_type sz)
36     : row(r), sz(sz), idx(0) {
37     if (row.size() != 0 && row[0] == 0) {
38       ++(*this);
39     }
40   }
41 
operator ++()42   Sparse_Row_from_Dense_Row_helper_iterator& operator++() {
43     PPL_ASSERT(idx < sz);
44     ++idx;
45     while (idx < sz && row[idx] == 0) {
46       ++idx;
47     }
48     return *this;
49   }
50 
operator ++(int)51   Sparse_Row_from_Dense_Row_helper_iterator operator++(int) {
52     Sparse_Row_from_Dense_Row_helper_iterator tmp = *this;
53     ++(*this);
54     return tmp;
55   }
56 
57   PPL::Coefficient_traits::const_reference
operator *() const58   operator*() const {
59     PPL_ASSERT(idx < sz);
60     return row[idx];
61   }
62 
63   PPL::dimension_type
index() const64   index() const {
65     PPL_ASSERT(idx < sz);
66     return idx;
67   }
68 
69   bool
operator ==(const Sparse_Row_from_Dense_Row_helper_iterator & itr) const70   operator==(const Sparse_Row_from_Dense_Row_helper_iterator& itr) const {
71     PPL_ASSERT(&row == &(itr.row));
72     return idx == itr.idx;
73   }
74 
75   bool
operator !=(const Sparse_Row_from_Dense_Row_helper_iterator & itr) const76   operator!=(const Sparse_Row_from_Dense_Row_helper_iterator& itr) const {
77     return !(*this == itr);
78   }
79 
80 private:
81   const PPL::Dense_Row& row;
82   PPL::dimension_type sz;
83   PPL::dimension_type idx;
84 };
85 
86 // Returns the number of nonzero elements in row.
87 PPL::dimension_type
Sparse_Row_from_Dense_Row_helper_function(const PPL::Dense_Row & row,PPL::dimension_type sz)88 Sparse_Row_from_Dense_Row_helper_function(const PPL::Dense_Row& row,
89                                           PPL::dimension_type sz) {
90   PPL::dimension_type count = 0;
91   for (PPL::dimension_type i = sz; i-- > 0; ) {
92     if (row[i] != 0) {
93       ++count;
94     }
95   }
96   return count;
97 }
98 
99 } // namespace
100 
Sparse_Row(const PPL::Dense_Row & row)101 PPL::Sparse_Row::Sparse_Row(const PPL::Dense_Row& row)
102   : tree(Sparse_Row_from_Dense_Row_helper_iterator(row, row.size()),
103          Sparse_Row_from_Dense_Row_helper_function(row, row.size())),
104     size_(row.size()) {
105   PPL_ASSERT(OK());
106 }
107 
Sparse_Row(const Dense_Row & row,dimension_type sz,dimension_type capacity)108 PPL::Sparse_Row::Sparse_Row(const Dense_Row& row, dimension_type sz,
109                             dimension_type capacity)
110   : tree(Sparse_Row_from_Dense_Row_helper_iterator(row, row.size()),
111          Sparse_Row_from_Dense_Row_helper_function(row, row.size())),
112     size_(sz) {
113   (void)capacity;
114   PPL_ASSERT(OK());
115 }
116 
117 PPL::Sparse_Row&
operator =(const PPL::Dense_Row & row)118 PPL::Sparse_Row::operator=(const PPL::Dense_Row& row) {
119   Sparse_Row tmp(row);
120   swap(*this, tmp);
121   PPL_ASSERT(OK());
122 
123   return *this;
124 }
125 
126 void
swap_coefficients(dimension_type i,dimension_type j)127 PPL::Sparse_Row::swap_coefficients(dimension_type i, dimension_type j) {
128   PPL_ASSERT(i < size_);
129   PPL_ASSERT(j < size_);
130 
131   if (tree.empty()) {
132     return;
133   }
134 
135   using std::swap;
136 
137   iterator itr_i = tree.bisect(i);
138   iterator itr_j = tree.bisect(j);
139   if (itr_i.index() == i) {
140     if (itr_j.index() == j) {
141       // Both elements are in the tree.
142       swap(*itr_i, *itr_j);
143     }
144     else {
145       // i is in the tree, j is not.
146       PPL_DIRTY_TEMP_COEFFICIENT(tmp);
147       swap(*itr_i, tmp);
148       tree.erase(itr_i);
149       // Now both iterators have been invalidated.
150       itr_j = tree.insert(j);
151       swap(*itr_j, tmp);
152     }
153   }
154   else {
155     if (itr_j.index() == j) {
156       // j is in the tree, i is not.
157       PPL_DIRTY_TEMP_COEFFICIENT(tmp);
158       swap(*itr_j, tmp);
159       // Now both iterators have been invalidated.
160       tree.erase(itr_j);
161       itr_i = tree.insert(i);
162       swap(*itr_i, tmp);
163     }
164     else {
165       // Do nothing, elements are both non-stored zeroes.
166     }
167   }
168 }
169 
170 PPL::Sparse_Row::iterator
reset(iterator first,iterator last)171 PPL::Sparse_Row::reset(iterator first, iterator last) {
172   if (first == last) {
173     return first;
174   }
175 
176   PPL_ASSERT(last != end());
177   --last;
178   const dimension_type j = last.index();
179   PPL_ASSERT(first.index() <= j);
180   // We can't just compare first and last at each iteration, because last will
181   // be invalidated by the first erase.
182   while (first.index() < j) {
183     first = reset(first);
184   }
185 
186   first = reset(first);
187 
188   PPL_ASSERT(OK());
189   return first;
190 }
191 
192 void
reset_after(dimension_type i)193 PPL::Sparse_Row::reset_after(dimension_type i) {
194   PPL_ASSERT(i < size_);
195 
196   iterator itr = lower_bound(i);
197   // This is a const reference to an internal iterator, that is kept valid.
198   // If we just stored a copy, that would be invalidated by the calls to
199   // reset().
200   const iterator& itr_end = end();
201 
202   while (itr != itr_end) {
203     itr = reset(itr);
204   }
205 
206   PPL_ASSERT(OK());
207 }
208 
209 void
normalize()210 PPL::Sparse_Row::normalize() {
211   // Compute the GCD of all the coefficients.
212   PPL_DIRTY_TEMP_COEFFICIENT(gcd);
213   const_iterator i = begin();
214   const_iterator i_end = end();
215   for ( ; i != i_end; ++i) {
216     Coefficient_traits::const_reference x_i = *i;
217     if (const int x_i_sign = sgn(x_i)) {
218       gcd = x_i;
219       if (x_i_sign < 0) {
220         neg_assign(gcd);
221       }
222       goto compute_gcd;
223     }
224   }
225   // We reach this point only if all the coefficients were zero.
226   return;
227 
228  compute_gcd:
229   if (gcd == 1) {
230     return;
231   }
232   for (++i; i != i_end; ++i) {
233     Coefficient_traits::const_reference x_i = *i;
234     if (x_i != 0) {
235       // Note: we use the ternary version instead of a more concise
236       // gcd_assign(gcd, x_i) to take advantage of the fact that
237       // `gcd' will decrease very rapidly (see D. Knuth, The Art of
238       // Computer Programming, second edition, Section 4.5.2,
239       // Algorithm C, and the discussion following it).  Our
240       // implementation of gcd_assign(x, y, z) for checked numbers is
241       // optimized for the case where `z' is smaller than `y', so that
242       // on checked numbers we gain.  On the other hand, for the
243       // implementation of gcd_assign(x, y, z) on GMP's unbounded
244       // integers we cannot make any assumption, so here we draw.
245       // Overall, we win.
246       gcd_assign(gcd, x_i, gcd);
247       if (gcd == 1) {
248         return;
249       }
250     }
251   }
252   // Divide the coefficients by the GCD.
253   for (iterator j = begin(), j_end = end(); j != j_end; ++j) {
254     Coefficient& x_j = *j;
255     exact_div_assign(x_j, x_j, gcd);
256   }
257 
258   PPL_ASSERT(OK());
259 }
260 
261 namespace {
262 
263 class sparse_row_linear_combine_helper_iterator {
264 public:
sparse_row_linear_combine_helper_iterator(const PPL::Sparse_Row & x,const PPL::Sparse_Row & y,PPL::Coefficient_traits::const_reference coeff1_1,PPL::Coefficient_traits::const_reference coeff2_1)265   sparse_row_linear_combine_helper_iterator(
266     const PPL::Sparse_Row& x, const PPL::Sparse_Row& y,
267     PPL::Coefficient_traits::const_reference coeff1_1,
268     PPL::Coefficient_traits::const_reference coeff2_1)
269     : coeff1(coeff1_1), coeff2(coeff2_1) {
270     i = x.begin();
271     i_end = x.end();
272     j = y.begin();
273     j_end = y.end();
274     update_current_data();
275   }
276 
operator ++()277   void operator++() {
278     if (from_i) {
279       ++i;
280     }
281     if (from_j) {
282       ++j;
283     }
284     update_current_data();
285   }
286 
operator *()287   PPL::Coefficient_traits::const_reference operator*() {
288     return current_value;
289   }
290 
index()291   PPL::dimension_type index() {
292     return current_index;
293   }
294 
295 private:
update_current_data()296   void update_current_data() {
297     if (i == i_end) {
298       if (j == j_end) {
299         return;
300       }
301       else {
302         // i == i_end, j != j_end, so use j.
303         current_index = j.index();
304         current_value = *j;
305         current_value *= coeff2;
306         from_i = false;
307         from_j = true;
308       }
309     }
310     else {
311       if (j == j_end) {
312         // i != i_end, j == j_end, so use i.
313         current_index = i.index();
314         current_value = *i;
315         current_value *= coeff1;
316         from_i = true;
317         from_j = false;
318       }
319       else {
320         // i != i_end and j != j_end.
321         if (i.index() < j.index()) {
322           // i.index() < j.index(), so use i.
323           current_index = i.index();
324           current_value = *i;
325           current_value *= coeff1;
326           from_i = true;
327           from_j = false;
328         }
329         else {
330           if (i.index() != j.index()) {
331             PPL_ASSERT(i.index() > j.index());
332             // i.index() > j.index(), so use j.
333             current_index = j.index();
334             current_value = *j;
335             current_value *= coeff2;
336             from_i = false;
337             from_j = true;
338           }
339           else {
340             // i.index() == j.index(), so use both i and j.
341             current_index = i.index();
342             current_value = *i;
343             current_value *= coeff1;
344             PPL::add_mul_assign(current_value, *j, coeff2);
345             from_i = true;
346             from_j = true;
347           }
348         }
349       }
350     }
351     PPL_ASSERT(!from_i || i != i_end);
352     PPL_ASSERT(!from_j || j != j_end);
353   }
354 
355   PPL::Coefficient_traits::const_reference coeff1;
356   PPL::Coefficient_traits::const_reference coeff2;
357   PPL::Sparse_Row::const_iterator i;
358   PPL::Sparse_Row::const_iterator i_end;
359   PPL::Sparse_Row::const_iterator j;
360   PPL::Sparse_Row::const_iterator j_end;
361   PPL::dimension_type current_index;
362   PPL::Coefficient current_value;
363   bool from_i;
364   bool from_j;
365 };
366 
367 } // namespace
368 
369 void
linear_combine(const Sparse_Row & y,Coefficient_traits::const_reference coeff1,Coefficient_traits::const_reference coeff2)370 PPL::Sparse_Row::linear_combine(const Sparse_Row& y,
371                                 Coefficient_traits::const_reference coeff1,
372                                 Coefficient_traits::const_reference coeff2) {
373   PPL_ASSERT(coeff1 != 0);
374   PPL_ASSERT(coeff2 != 0);
375   PPL_ASSERT(this != &y);
376 
377   if (coeff1 == 1) {
378     // Optimize for this special case.
379     iterator i = end();
380     for (const_iterator j = y.begin(), j_end = y.end(); j != j_end; ++j) {
381       i = insert(i, j.index());
382       add_mul_assign(*i, *j, coeff2);
383       if (*i == 0) {
384         i = reset(i);
385       }
386     }
387     return;
388   }
389 
390   dimension_type counter = 0;
391   // Count the number of elements that are stored in y but not in *this.
392   {
393     iterator i = begin();
394     iterator i_end = end();
395     const_iterator j = y.begin();
396     const_iterator j_end = y.end();
397     if (i != i_end) {
398       while (j != j_end) {
399         PPL_ASSERT(i != i_end);
400         if (i.index() == j.index()) {
401           ++i;
402           ++j;
403           if (i == i_end) {
404             break;
405           }
406         }
407         else {
408           if (i.index() < j.index()) {
409             i = lower_bound(i, j.index());
410             if (i == i_end) {
411               break;
412             }
413           }
414           else {
415             PPL_ASSERT(i.index() > j.index());
416             ++counter;
417             ++j;
418           }
419         }
420       }
421     }
422     PPL_ASSERT(i == i_end || j == j_end);
423     for ( ; j != j_end; ++j) {
424       ++counter;
425     }
426   }
427   // This condition is arbitrary. Changing it affects performance but not
428   // correctness. The values have been tuned using some ppl_lpsol benchmarks
429   // on 2 October 2010.
430   if (counter == 0 || counter < (7 * size()) / 64) {
431     // Few insertions needed, do them directly.
432     iterator i = begin();
433     // This is a const reference to an internal iterator, that is kept valid.
434     // If we just stored a copy, that would be invalidated by the calls to
435     // reset() and insert().
436     const iterator& i_end = end();
437     const_iterator j = y.begin();
438     const_iterator j_end = y.end();
439     while (i != i_end && j != j_end) {
440       if (i.index() == j.index()) {
441         (*i) *= coeff1;
442         add_mul_assign(*i, *j, coeff2);
443         if (*i == 0) {
444           i = reset(i);
445         }
446         else {
447           ++i;
448         }
449         ++j;
450       }
451       else {
452         if (i.index() < j.index()) {
453           (*i) *= coeff1;
454           ++i;
455         }
456         else {
457           PPL_ASSERT(i.index() > j.index());
458           i = insert(i, j.index(), *j);
459           (*i) *= coeff2;
460           ++i;
461           ++j;
462         }
463       }
464     }
465     PPL_ASSERT(i == i_end || j == j_end);
466     for ( ; i != i_end; ++i) {
467       (*i) *= coeff1;
468     }
469     for ( ; j != j_end; ++j) {
470       i = insert(i, j.index(), *j);
471       (*i) *= coeff2;
472     }
473   }
474   else {
475     // Too many insertions needed, a full copy is probably faster than
476     // inserting all those new elements into *this.
477     CO_Tree new_tree(sparse_row_linear_combine_helper_iterator(*this, y,
478                                                                 coeff1,
479                                                                 coeff2),
480                      counter + tree.size());
481     tree.m_swap(new_tree);
482 
483     // Now remove stored zeroes.
484     iterator i = begin();
485     // Note that end() can not be called only once, because reset()
486     // invalidates all iterators.
487     while (i != end()) {
488       if (*i == 0) {
489 #ifndef NDEBUG
490         const dimension_type old_index = i.index();
491 #endif
492         i = reset(i);
493         PPL_ASSERT(find(old_index) == end());
494       }
495       else {
496         ++i;
497       }
498     }
499   }
500 }
501 
502 void
linear_combine(const Sparse_Row & y,Coefficient_traits::const_reference coeff1,Coefficient_traits::const_reference coeff2,dimension_type start,dimension_type end)503 PPL::Sparse_Row::linear_combine(const Sparse_Row& y,
504                                 Coefficient_traits::const_reference coeff1,
505                                 Coefficient_traits::const_reference coeff2,
506                                 dimension_type start, dimension_type end) {
507   PPL_ASSERT(coeff1 != 0);
508   PPL_ASSERT(coeff2 != 0);
509   PPL_ASSERT(this != &y);
510 
511   if (coeff1 == 1) {
512     if (coeff2 == 1) {
513       // Optimized implementation for coeff1==1, coeff2==1.
514       iterator i = this->end();
515       for (const_iterator j = y.lower_bound(start),
516              j_end = y.lower_bound(end); j != j_end; ++j) {
517         i = insert(i, j.index());
518         *i += *j;
519         if (*i == 0) {
520           i = reset(i);
521         }
522       }
523       return;
524     }
525     if (coeff2 == -1) {
526       // Optimized implementation for coeff1==1, coeff2==-1.
527       iterator i = this->end();
528       for (const_iterator j = y.lower_bound(start),
529              j_end = y.lower_bound(end); j != j_end; ++j) {
530         i = insert(i, j.index());
531         *i -= *j;
532         if (*i == 0) {
533           i = reset(i);
534         }
535       }
536       return;
537     }
538     // Optimized implementation for coeff1==1.
539     iterator i = this->end();
540     for (const_iterator j = y.lower_bound(start),
541            j_end = y.lower_bound(end); j != j_end; ++j) {
542       i = insert(i, j.index());
543       add_mul_assign(*i, *j, coeff2);
544       if (*i == 0) {
545         i = reset(i);
546       }
547     }
548     return;
549   }
550 
551   if (coeff2 == 1) {
552     // Optimized implementation for coeff2==1.
553     iterator i = lower_bound(start);
554     // This is a const reference to an internal iterator, that is kept valid.
555     // If we just stored a copy, that would be invalidated by the calls to
556     // reset() and insert().
557     const iterator& i_end = this->end();
558     const_iterator j = y.lower_bound(start);
559     const_iterator j_end = y.lower_bound(end);
560     while (i != i_end && i.index() < end && j != j_end) {
561       if (i.index() == j.index()) {
562         (*i) *= coeff1;
563         *i += *j;
564         if (*i == 0) {
565           i = reset(i);
566         }
567         else {
568           ++i;
569         }
570         ++j;
571       }
572       else {
573         if (i.index() < j.index()) {
574           (*i) *= coeff1;
575           ++i;
576         }
577         else {
578           PPL_ASSERT(i.index() > j.index());
579           i = insert(i, j.index(), *j);
580           ++i;
581           ++j;
582         }
583       }
584     }
585     PPL_ASSERT(i == i_end || j == j_end);
586     for ( ; i != i_end && i.index() < end; ++i) {
587       (*i) *= coeff1;
588     }
589     for ( ; j != j_end; ++j) {
590       i = insert(i, j.index(), *j);
591     }
592     return;
593   }
594 
595   if (coeff2 == -1) {
596     // Optimized implementation for coeff2==-1.
597     iterator i = lower_bound(start);
598     // This is a const reference to an internal iterator, that is kept valid.
599     // If we just stored a copy, that would be invalidated by the calls to
600     // reset() and insert().
601     const iterator& i_end = this->end();
602     const_iterator j = y.lower_bound(start);
603     const_iterator j_end = y.lower_bound(end);
604     while (i != i_end && i.index() < end && j != j_end) {
605       if (i.index() == j.index()) {
606         (*i) *= coeff1;
607         *i -= *j;
608         if (*i == 0) {
609           i = reset(i);
610         }
611         else {
612           ++i;
613         }
614         ++j;
615       }
616       else {
617         if (i.index() < j.index()) {
618           (*i) *= coeff1;
619           ++i;
620         }
621         else {
622           PPL_ASSERT(i.index() > j.index());
623           i = insert(i, j.index(), *j);
624           neg_assign(*i);
625           ++i;
626           ++j;
627         }
628       }
629     }
630     PPL_ASSERT(i == i_end || j == j_end);
631     for ( ; i != i_end && i.index() < end; ++i) {
632       (*i) *= coeff1;
633     }
634     for ( ; j != j_end; ++j) {
635       i = insert(i, j.index(), *j);
636       neg_assign(*i);
637     }
638     return;
639   }
640 
641   iterator i = lower_bound(start);
642   // This is a const reference to an internal iterator, that is kept valid.
643   // If we just stored a copy, that would be invalidated by the calls to
644   // reset() and insert().
645   const iterator& i_end = this->end();
646   const_iterator j = y.lower_bound(start);
647   const_iterator j_end = y.lower_bound(end);
648   while (i != i_end && i.index() < end && j != j_end) {
649     if (i.index() == j.index()) {
650       (*i) *= coeff1;
651       add_mul_assign(*i, *j, coeff2);
652       if (*i == 0) {
653         i = reset(i);
654       }
655       else {
656         ++i;
657       }
658       ++j;
659     }
660     else {
661       if (i.index() < j.index()) {
662         (*i) *= coeff1;
663         ++i;
664       }
665       else {
666         PPL_ASSERT(i.index() > j.index());
667         i = insert(i, j.index(), *j);
668         (*i) *= coeff2;
669         ++i;
670         ++j;
671       }
672     }
673   }
674   PPL_ASSERT(i == i_end || j == j_end);
675   for ( ; i != i_end && i.index() < end; ++i) {
676     (*i) *= coeff1;
677   }
678   for ( ; j != j_end; ++j) {
679     i = insert(i, j.index(), *j);
680     (*i) *= coeff2;
681   }
682 }
683 
684 void
ascii_dump(std::ostream & s) const685 PPL::Sparse_Row::ascii_dump(std::ostream& s) const {
686   s << "size " << size_ << ' ';
687   dimension_type n_elements = 0;
688   for (const_iterator i = begin(), i_end = end(); i != i_end; ++i) {
689     ++n_elements;
690   }
691   s << "elements " << n_elements << ' ';
692   for (const_iterator i = begin(), i_end = end(); i != i_end; ++i) {
693     s << "[ " << i.index() << " ]= " << *i << ' ';
694   }
695   s << "\n";
696 }
697 
PPL_OUTPUT_DEFINITIONS_ASCII_ONLY(Sparse_Row)698 PPL_OUTPUT_DEFINITIONS_ASCII_ONLY(Sparse_Row)
699 
700 bool
701 PPL::Sparse_Row::ascii_load(std::istream& s) {
702   std::string str;
703   if (!(s >> str) || str != "size") {
704     return false;
705   }
706   if (!(s >> size_)) {
707     return false;
708   }
709   clear();
710 
711   if (!(s >> str) || str != "elements") {
712     return false;
713   }
714 
715   dimension_type n_elements;
716   if (!(s >> n_elements)) {
717     return false;
718   }
719 
720   PPL_DIRTY_TEMP_COEFFICIENT(current_data);
721   for (dimension_type i = 0; i < n_elements; ++i) {
722     dimension_type current_key;
723     if (!(s >> str) || str != "[") {
724       return false;
725     }
726     if (!(s >> current_key)) {
727       return false;
728     }
729     if (!(s >> str) || str != "]=") {
730       return false;
731     }
732     if (!(s >> current_data)) {
733       return false;
734     }
735     tree.insert(current_key, current_data);
736   }
737   PPL_ASSERT(OK());
738   return true;
739 }
740 
741 bool
OK() const742 PPL::Sparse_Row::OK() const {
743   if (begin() == end()) {
744     return true;
745   }
746   const_iterator last = end();
747   --last;
748   return (last.index() < size_);
749 }
750 
751 bool
OK(dimension_type) const752 PPL::Sparse_Row::OK(dimension_type /* capacity */) const {
753   return OK();
754 }
755 
756 /*! \relates Parma_Polyhedra_Library::Sparse_Row */
757 bool
operator ==(const Sparse_Row & x,const Sparse_Row & y)758 PPL::operator==(const Sparse_Row& x, const Sparse_Row& y) {
759   if (x.size() != y.size()) {
760     return false;
761   }
762   Sparse_Row::const_iterator i = x.begin();
763   Sparse_Row::const_iterator i_end = x.end();
764   Sparse_Row::const_iterator j = y.begin();
765   Sparse_Row::const_iterator j_end = y.end();
766   while (i != i_end && j != j_end) {
767     if (i.index() == j.index()) {
768       if (*i != *j) {
769         return false;
770       }
771       ++i;
772       ++j;
773     }
774     else {
775       if (i.index() < j.index()) {
776         if (*i != 0) {
777           return false;
778         }
779         ++i;
780       }
781       else {
782         PPL_ASSERT(i.index() > j.index());
783         if (*j != 0) {
784           return false;
785         }
786         ++j;
787       }
788     }
789   }
790   for ( ; i != i_end; ++i) {
791     if (*i != 0) {
792       return false;
793     }
794   }
795   for ( ; j != j_end; ++j) {
796     if (*j != 0) {
797       return false;
798     }
799   }
800   return true;
801 }
802 
803 /*! \relates Parma_Polyhedra_Library::Sparse_Row */
804 bool
operator !=(const Sparse_Row & x,const Sparse_Row & y)805 PPL::operator!=(const Sparse_Row& x, const Sparse_Row& y) {
806   return !(x == y);
807 }
808 
809 bool
operator ==(const Dense_Row & x,const Sparse_Row & y)810 PPL::operator==(const Dense_Row& x, const Sparse_Row& y) {
811   if (x.size() != y.size()) {
812     return false;
813   }
814   Sparse_Row::const_iterator itr = y.end();
815   for (dimension_type i = 0; i < x.size(); ++i) {
816     itr = y.lower_bound(itr, i);
817     if (itr != y.end() && itr.index() == i) {
818       if (x[i] != *itr) {
819         return false;
820       }
821     }
822     else {
823       if (x[i] != 0) {
824         return false;
825       }
826     }
827   }
828   return true;
829 }
830 
831 bool
operator !=(const Dense_Row & x,const Sparse_Row & y)832 PPL::operator!=(const Dense_Row& x, const Sparse_Row& y) {
833   return !(x == y);
834 }
835 
836 bool
operator ==(const Sparse_Row & x,const Dense_Row & y)837 PPL::operator==(const Sparse_Row& x, const Dense_Row& y) {
838   return y == x;
839 }
840 
841 bool
operator !=(const Sparse_Row & x,const Dense_Row & y)842 PPL::operator!=(const Sparse_Row& x, const Dense_Row& y) {
843   return !(x == y);
844 }
845 
846 void
linear_combine(Sparse_Row & x,const Dense_Row & y,Coefficient_traits::const_reference coeff1,Coefficient_traits::const_reference coeff2)847 PPL::linear_combine(Sparse_Row& x, const Dense_Row& y,
848                     Coefficient_traits::const_reference coeff1,
849                     Coefficient_traits::const_reference coeff2) {
850   PPL_ASSERT(x.size() >= y.size());
851   PPL_ASSERT(coeff1 != 0);
852   PPL_ASSERT(coeff2 != 0);
853 
854   Sparse_Row::iterator itr = x.end();
855 
856   for (dimension_type i = 0; i < y.size(); ++i) {
857     itr = x.lower_bound(itr, i);
858     if (itr == x.end() || itr.index() != i) {
859       if (y[i] == 0) {
860         continue;
861       }
862       itr = x.insert(itr, i, y[i]);
863       (*itr) *= coeff2;
864       PPL_ASSERT((*itr) != 0);
865     }
866     else {
867       PPL_ASSERT(itr.index() == i);
868       (*itr) *= coeff1;
869       add_mul_assign(*itr, y[i], coeff2);
870       if (*itr == 0) {
871         itr = x.reset(itr);
872       }
873     }
874   }
875 }
876 
877 void
linear_combine(Sparse_Row & x,const Dense_Row & y,Coefficient_traits::const_reference coeff1,Coefficient_traits::const_reference coeff2,dimension_type start,dimension_type end)878 PPL::linear_combine(Sparse_Row& x, const Dense_Row& y,
879                     Coefficient_traits::const_reference coeff1,
880                     Coefficient_traits::const_reference coeff2,
881                     dimension_type start, dimension_type end) {
882   PPL_ASSERT(coeff1 != 0);
883   PPL_ASSERT(coeff2 != 0);
884   PPL_ASSERT(start <= end);
885   PPL_ASSERT(end <= x.size());
886   PPL_ASSERT(end <= y.size());
887 
888   Sparse_Row::iterator itr = x.lower_bound(start);
889 
890   if (coeff1 == 1) {
891     if (coeff2 == 1) {
892       for (dimension_type i = start; i < end; ++i) {
893         PPL_ASSERT(itr == x.end() || itr.index() + 1 >= i);
894         if (itr != x.end() && itr.index() < i) {
895           ++itr;
896         }
897         PPL_ASSERT(itr == x.end() || itr.index() >= i);
898         if (itr == x.end() || itr.index() != i) {
899           if (y[i] == 0) {
900             continue;
901           }
902           itr = x.insert(itr, i, y[i]);
903           PPL_ASSERT((*itr) != 0);
904         }
905         else {
906           PPL_ASSERT(itr.index() == i);
907           (*itr) += y[i];
908           if (*itr == 0) {
909             itr = x.reset(itr);
910           }
911         }
912       }
913       return;
914     }
915     if (coeff2 == -1) {
916       for (dimension_type i = start; i < end; ++i) {
917         PPL_ASSERT(itr == x.end() || itr.index() + 1 >= i);
918         if (itr != x.end() && itr.index() < i) {
919           ++itr;
920         }
921         PPL_ASSERT(itr == x.end() || itr.index() >= i);
922         if (itr == x.end() || itr.index() != i) {
923           if (y[i] == 0) {
924             continue;
925           }
926           itr = x.insert(itr, i, y[i]);
927           neg_assign(*itr);
928           PPL_ASSERT((*itr) != 0);
929         }
930         else {
931           PPL_ASSERT(itr.index() == i);
932           (*itr) -= y[i];
933           if (*itr == 0) {
934             itr = x.reset(itr);
935           }
936         }
937       }
938       return;
939     }
940     for (dimension_type i = start; i < end; ++i) {
941       PPL_ASSERT(itr == x.end() || itr.index() + 1 >= i);
942       if (itr != x.end() && itr.index() < i) {
943         ++itr;
944       }
945       PPL_ASSERT(itr == x.end() || itr.index() >= i);
946       if (itr == x.end() || itr.index() != i) {
947         if (y[i] == 0) {
948           continue;
949         }
950         itr = x.insert(itr, i, y[i]);
951         (*itr) *= coeff2;
952         PPL_ASSERT((*itr) != 0);
953       }
954       else {
955         PPL_ASSERT(itr.index() == i);
956         add_mul_assign(*itr, y[i], coeff2);
957         if (*itr == 0) {
958           itr = x.reset(itr);
959         }
960       }
961     }
962     return;
963   }
964 
965   if (coeff2 == 1) {
966     for (dimension_type i = start; i < end; ++i) {
967       PPL_ASSERT(itr == x.end() || itr.index() + 1 >= i);
968       if (itr != x.end() && itr.index() < i) {
969         ++itr;
970       }
971       PPL_ASSERT(itr == x.end() || itr.index() >= i);
972       if (itr == x.end() || itr.index() != i) {
973         if (y[i] == 0) {
974           continue;
975         }
976         itr = x.insert(itr, i, y[i]);
977         PPL_ASSERT((*itr) != 0);
978       }
979       else {
980         PPL_ASSERT(itr.index() == i);
981         (*itr) *= coeff1;
982         (*itr) += y[i];
983         if (*itr == 0) {
984           itr = x.reset(itr);
985         }
986       }
987     }
988     return;
989   }
990   if (coeff2 == -1) {
991     for (dimension_type i = start; i < end; ++i) {
992       PPL_ASSERT(itr == x.end() || itr.index() + 1 >= i);
993       if (itr != x.end() && itr.index() < i) {
994         ++itr;
995       }
996       PPL_ASSERT(itr == x.end() || itr.index() >= i);
997       if (itr == x.end() || itr.index() != i) {
998         if (y[i] == 0) {
999           continue;
1000         }
1001         itr = x.insert(itr, i, y[i]);
1002         neg_assign(*itr);
1003         PPL_ASSERT((*itr) != 0);
1004       }
1005       else {
1006         PPL_ASSERT(itr.index() == i);
1007         (*itr) *= coeff1;
1008         (*itr) -= y[i];
1009         if (*itr == 0) {
1010           itr = x.reset(itr);
1011         }
1012       }
1013     }
1014     return;
1015   }
1016 
1017   for (dimension_type i = start; i < end; ++i) {
1018     PPL_ASSERT(itr == x.end() || itr.index() + 1 >= i);
1019     if (itr != x.end() && itr.index() < i) {
1020       ++itr;
1021     }
1022     PPL_ASSERT(itr == x.end() || itr.index() >= i);
1023     if (itr == x.end() || itr.index() != i) {
1024       if (y[i] == 0) {
1025         continue;
1026       }
1027       itr = x.insert(itr, i, y[i]);
1028       (*itr) *= coeff2;
1029       PPL_ASSERT((*itr) != 0);
1030     }
1031     else {
1032       PPL_ASSERT(itr.index() == i);
1033       (*itr) *= coeff1;
1034       add_mul_assign(*itr, y[i], coeff2);
1035       if (*itr == 0) {
1036         itr = x.reset(itr);
1037       }
1038     }
1039   }
1040 }
1041 
1042 void
linear_combine(Dense_Row & x,const Sparse_Row & y,Coefficient_traits::const_reference coeff1,Coefficient_traits::const_reference coeff2)1043 PPL::linear_combine(Dense_Row& x, const Sparse_Row& y,
1044                     Coefficient_traits::const_reference coeff1,
1045                     Coefficient_traits::const_reference coeff2) {
1046   PPL_ASSERT(x.size() >= y.size());
1047   if (coeff1 == 1) {
1048     for (Sparse_Row::const_iterator i = y.begin(),
1049            i_end = y.end(); i != i_end; ++i) {
1050       add_mul_assign(x[i.index()], *i, coeff2);
1051     }
1052     return;
1053   }
1054 
1055   Sparse_Row::const_iterator itr = y.end();
1056 
1057   for (dimension_type i = 0; i < x.size(); ++i) {
1058     x[i] *= coeff1;
1059 
1060     itr = y.lower_bound(itr, i);
1061 
1062     if (itr == y.end() || itr.index() != i) {
1063       continue;
1064     }
1065 
1066     add_mul_assign(x[i], *itr, coeff2);
1067   }
1068 }
1069 
1070 void
linear_combine(Dense_Row & x,const Sparse_Row & y,Coefficient_traits::const_reference coeff1,Coefficient_traits::const_reference coeff2,dimension_type start,dimension_type end)1071 PPL::linear_combine(Dense_Row& x, const Sparse_Row& y,
1072                     Coefficient_traits::const_reference coeff1,
1073                     Coefficient_traits::const_reference coeff2,
1074                     dimension_type start, dimension_type end) {
1075   PPL_ASSERT(x.size() >= y.size());
1076   PPL_ASSERT(coeff1 != 0);
1077   PPL_ASSERT(coeff2 != 0);
1078 
1079   Sparse_Row::const_iterator itr = y.lower_bound(start);
1080 
1081   if (coeff1 == 1) {
1082     Sparse_Row::const_iterator itr_end = y.lower_bound(end);
1083     if (coeff2 == 1) {
1084       for ( ; itr != itr_end; ++itr) {
1085         x[itr.index()] += *itr;
1086       }
1087       return;
1088     }
1089     if (coeff2 == -1) {
1090       for ( ; itr != itr_end; ++itr) {
1091         x[itr.index()] -= *itr;
1092       }
1093       return;
1094     }
1095     for ( ; itr != itr_end; ++itr) {
1096       add_mul_assign(x[itr.index()], *itr, coeff2);
1097     }
1098     return;
1099   }
1100 
1101   if (coeff2 == 1) {
1102     for (dimension_type i = start; i < end; ++i) {
1103       x[i] *= coeff1;
1104 
1105       PPL_ASSERT(itr == y.end() || itr.index() + 1 >= i);
1106       if (itr != y.end() && itr.index() < i) {
1107         ++itr;
1108       }
1109       PPL_ASSERT(itr == y.end() || itr.index() >= i);
1110 
1111       if (itr == y.end() || itr.index() != i) {
1112         continue;
1113       }
1114 
1115       x[i] += *itr;
1116     }
1117     return;
1118   }
1119   if (coeff2 == -1) {
1120     for (dimension_type i = start; i < end; ++i) {
1121       x[i] *= coeff1;
1122 
1123       PPL_ASSERT(itr == y.end() || itr.index() + 1 >= i);
1124       if (itr != y.end() && itr.index() < i) {
1125         ++itr;
1126       }
1127       PPL_ASSERT(itr == y.end() || itr.index() >= i);
1128 
1129       if (itr == y.end() || itr.index() != i) {
1130         continue;
1131       }
1132 
1133       x[i] -= *itr;
1134     }
1135     return;
1136   }
1137 
1138   for (dimension_type i = start; i < end; ++i) {
1139     x[i] *= coeff1;
1140 
1141     PPL_ASSERT(itr == y.end() || itr.index() + 1 >= i);
1142     if (itr != y.end() && itr.index() < i) {
1143       ++itr;
1144     }
1145     PPL_ASSERT(itr == y.end() || itr.index() >= i);
1146 
1147     if (itr == y.end() || itr.index() != i) {
1148       continue;
1149     }
1150 
1151     add_mul_assign(x[i], *itr, coeff2);
1152   }
1153 }
1154 
1155 void
linear_combine(Sparse_Row & x,const Sparse_Row & y,Coefficient_traits::const_reference coeff1,Coefficient_traits::const_reference coeff2)1156 PPL::linear_combine(Sparse_Row& x, const Sparse_Row& y,
1157                     Coefficient_traits::const_reference coeff1,
1158                     Coefficient_traits::const_reference coeff2) {
1159   x.linear_combine(y, coeff1, coeff2);
1160 }
1161 
1162 void
linear_combine(Sparse_Row & x,const Sparse_Row & y,Coefficient_traits::const_reference c1,Coefficient_traits::const_reference c2,dimension_type start,dimension_type end)1163 PPL::linear_combine(Sparse_Row& x, const Sparse_Row& y,
1164                     Coefficient_traits::const_reference c1,
1165                     Coefficient_traits::const_reference c2,
1166                     dimension_type start, dimension_type end) {
1167   x.linear_combine(y, c1, c2, start, end);
1168 }
1169 
1170 void
swap(Sparse_Row & x,Dense_Row & y)1171 PPL::swap(Sparse_Row& x, Dense_Row& y) {
1172   Dense_Row new_dense(x.size(), x.size());
1173 
1174   for (Sparse_Row::iterator i = x.begin(), i_end = x.end();
1175        i != i_end; ++i) {
1176     swap(new_dense[i.index()], *i);
1177   }
1178 
1179   // NOTE: This copies the coefficients, but it could steal them.
1180   // Implementing a stealing-based algorithm takes a lot of time and it's
1181   // probably not worth it.
1182   Sparse_Row new_sparse(y);
1183 
1184   swap(new_dense, y);
1185   swap(new_sparse, x);
1186 }
1187 
1188 void
swap(Dense_Row & x,Sparse_Row & y)1189 PPL::swap(Dense_Row& x, Sparse_Row& y) {
1190   swap(y, x);
1191 }
1192