1 /*++
2 Copyright (c) 2017 Microsoft Corporation
3 
4 Module Name:
5 
6     <name>
7 
8 Abstract:
9 
10     <abstract>
11 
12 Author:
13 
14     Lev Nachmanson (levnach)
15 
16 Revision History:
17 
18 
19 --*/
20 
21 #include "util/vector.h"
22 #include "math/lp/square_sparse_matrix.h"
23 #include <set>
24 #include <queue>
25 namespace lp {
26 template <typename T, typename X>
27 template <typename M>
copy_column_from_input(unsigned input_column,const M & A,unsigned j)28 void square_sparse_matrix<T, X>::copy_column_from_input(unsigned input_column, const M& A, unsigned j) {
29     vector<indexed_value<T>> & new_column_vector = m_columns[j].m_values;
30     for (auto c : A.column(input_column)) {
31         unsigned col_offset = static_cast<unsigned>(new_column_vector.size());
32         vector<indexed_value<T>> & row_vector = m_rows[c.var()];
33         unsigned row_offset = static_cast<unsigned>(row_vector.size());
34         new_column_vector.push_back(indexed_value<T>(c.coeff(), c.var(), row_offset));
35         row_vector.push_back(indexed_value<T>(c.coeff(), j, col_offset));
36         m_n_of_active_elems++;
37     }
38 }
39 
40 template <typename T, typename X>
41 template <typename M>
copy_column_from_input_with_possible_zeros(const M & A,unsigned j)42 void square_sparse_matrix<T, X>::copy_column_from_input_with_possible_zeros(const M& A, unsigned j) {
43     vector<indexed_value<T>> & new_column_vector = m_columns[j].m_values;
44     for (auto c : A.column(j)) {
45         if (is_zero(c.coeff()))
46             continue;
47         unsigned col_offset = static_cast<unsigned>(new_column_vector.size());
48         vector<indexed_value<T>> & row_vector = m_rows[c.var()];
49         unsigned row_offset = static_cast<unsigned>(row_vector.size());
50         new_column_vector.push_back(indexed_value<T>(c.coeff(), c.var(), row_offset));
51         row_vector.push_back(indexed_value<T>(c.coeff(), j, col_offset));
52         m_n_of_active_elems++;
53     }
54 }
55 
56 template <typename T, typename X>
57 template <typename M>
copy_from_input_on_basis(const M & A,vector<unsigned> & basis)58 void square_sparse_matrix<T, X>::copy_from_input_on_basis(const M & A, vector<unsigned> & basis) {
59     unsigned m = A.row_count();
60     for (unsigned j = m; j-- > 0;) {
61         copy_column_from_input(basis[j], A, j);
62     }
63 }
64 template <typename T, typename X>
65 template <typename M>
copy_from_input(const M & A)66 void square_sparse_matrix<T, X>::copy_from_input(const M & A) {
67     unsigned m = A.row_count();
68     for (unsigned j = m; j-- > 0;) {
69         copy_column_from_input_with_possible_zeros(A, j);
70     }
71 }
72 
73 // constructor that copies columns of the basis from A
74 template <typename T, typename X>
75 template <typename M>
square_sparse_matrix(const M & A,vector<unsigned> & basis)76 square_sparse_matrix<T, X>::square_sparse_matrix(const M &A, vector<unsigned> & basis) :
77     m_n_of_active_elems(0),
78     m_pivot_queue(A.row_count()),
79     m_row_permutation(A.row_count()),
80     m_column_permutation(A.row_count()),
81     m_work_pivot_vector(A.row_count(), -1),
82     m_processed(A.row_count()) {
83     init_row_headers();
84     init_column_headers();
85     copy_from_input_on_basis(A, basis);
86 }
87 
88 template <typename T, typename X>
89 template <typename M>
square_sparse_matrix(const M & A)90 square_sparse_matrix<T, X>::square_sparse_matrix(const M &A) :
91     m_n_of_active_elems(0),
92     m_pivot_queue(A.row_count()),
93     m_row_permutation(A.row_count()),
94     m_column_permutation(A.row_count()),
95     m_work_pivot_vector(A.row_count(), -1),
96     m_processed(A.row_count()) {
97     init_row_headers();
98     init_column_headers();
99     copy_from_input(A);
100 }
101 
102 
103 template <typename T, typename X>
set_with_no_adjusting_for_row(unsigned row,unsigned col,T val)104 void square_sparse_matrix<T, X>::set_with_no_adjusting_for_row(unsigned row, unsigned col, T val) { // should not be used in efficient code
105     vector<indexed_value<T>> & row_vec = m_rows[row];
106     for (auto & iv : row_vec) {
107         if (iv.m_index == col) {
108             iv.set_value(val);
109             return;
110         }
111     }
112     // have not found the column between the indices
113     row_vec.push_back(indexed_value<T>(val, col, -1));
114 }
115 
116 template <typename T, typename X>
set_with_no_adjusting_for_col(unsigned row,unsigned col,T val)117 void square_sparse_matrix<T, X>::set_with_no_adjusting_for_col(unsigned row, unsigned col, T val) { // should not be used in efficient code
118     vector<indexed_value<T>> & col_vec = m_columns[col].m_values;
119     for (auto & iv : col_vec) {
120         if (iv.m_index == row) {
121             iv.set_value(val);
122             return;
123         }
124     }
125     // have not found the column between the indices
126     col_vec.push_back(indexed_value<T>(val, row, -1));
127 }
128 
129 
130 template <typename T, typename X>
set_with_no_adjusting(unsigned row,unsigned col,T val)131 void square_sparse_matrix<T, X>::set_with_no_adjusting(unsigned row, unsigned col, T val) { // should not be used in efficient code
132     set_with_no_adjusting_for_row(row, col, val);
133     set_with_no_adjusting_for_col(row, col, val);
134 }
135 
136 template <typename T, typename X>
set(unsigned row,unsigned col,T val)137 void square_sparse_matrix<T, X>::set(unsigned row, unsigned col, T val) { // should not be used in efficient code
138     lp_assert(row < dimension() && col < dimension());
139     //            m_dense.set_elem(row, col, val);
140     row = adjust_row(row);
141     col = adjust_column(col);
142     set_with_no_adjusting(row, col, val);
143     //            lp_assert(*this == m_dense);
144 }
145 
146 template <typename T, typename X>
get_not_adjusted(unsigned row,unsigned col)147 T const & square_sparse_matrix<T, X>::get_not_adjusted(unsigned row, unsigned col) const {
148     for (indexed_value<T> const & iv : m_rows[row]) {
149         if (iv.m_index == col) {
150             return iv.m_value;
151         }
152     }
153     return numeric_traits<T>::zero();
154 }
155 
156 template <typename T, typename X>
get(unsigned row,unsigned col)157 T const & square_sparse_matrix<T, X>::get(unsigned row, unsigned col) const { // should not be used in efficient code
158     row = adjust_row(row);
159     auto & row_chunk = m_rows[row];
160     col = adjust_column(col);
161     for (indexed_value<T> const & iv : row_chunk) {
162         if (iv.m_index == col) {
163             return iv.m_value;
164         }
165     }
166     return numeric_traits<T>::zero();
167 }
168 
169 // constructor creating a zero matrix of dim*dim
170 template <typename T, typename X>
square_sparse_matrix(unsigned dim,unsigned)171 square_sparse_matrix<T, X>::square_sparse_matrix(unsigned dim, unsigned ) :
172     m_pivot_queue(dim), // dim will be the initial size of the queue
173     m_row_permutation(dim),
174     m_column_permutation(dim),
175     m_work_pivot_vector(dim, -1),
176     m_processed(dim) {
177     init_row_headers();
178     init_column_headers();
179     }
180 
181 template <typename T, typename X>
init_row_headers()182 void square_sparse_matrix<T, X>::init_row_headers() {
183     for (unsigned l = 0; l < m_row_permutation.size(); l++) {
184         m_rows.push_back(vector<indexed_value<T>>());
185     }
186 }
187 
188 template <typename T, typename X>
init_column_headers()189 void square_sparse_matrix<T, X>::init_column_headers() { // we always have only square square_sparse_matrix
190     for (unsigned l = 0; l < m_row_permutation.size(); l++) {
191         m_columns.push_back(col_header());
192     }
193 }
194 
195 template <typename T, typename X>
lowest_row_in_column(unsigned j)196 unsigned square_sparse_matrix<T, X>::lowest_row_in_column(unsigned j) {
197     auto & mc = get_column_values(adjust_column(j));
198     unsigned ret = 0;
199     for (auto & iv : mc) {
200         unsigned row = adjust_row_inverse(iv.m_index);
201         if (row > ret) {
202             ret = row;
203         }
204     }
205     return ret;
206 }
207 
208 template <typename T, typename X>
remove_element(vector<indexed_value<T>> & row_vals,unsigned row_offset,vector<indexed_value<T>> & column_vals,unsigned column_offset)209 void square_sparse_matrix<T, X>::remove_element(vector<indexed_value<T>> & row_vals, unsigned row_offset, vector<indexed_value<T>> & column_vals, unsigned column_offset) {
210     if (column_offset != column_vals.size() - 1) {
211         auto & column_iv = column_vals[column_offset] = column_vals.back(); // copy from the tail
212         column_iv_other(column_iv).m_other = column_offset;
213         if (row_offset != row_vals.size() - 1) {
214             auto & row_iv = row_vals[row_offset] = row_vals.back(); // copy from the tail
215             row_iv_other(row_iv).m_other = row_offset;
216         }
217     } else if (row_offset != row_vals.size() - 1) {
218         auto & row_iv = row_vals[row_offset] = row_vals.back(); // copy from the tail
219         row_iv_other(row_iv).m_other = row_offset;
220     }
221     // do nothing - just decrease the sizes
222     column_vals.pop_back();
223     row_vals.pop_back();
224     m_n_of_active_elems--; // the value is correct only when refactoring
225 }
226 
227 template <typename T, typename X>
remove_element(vector<indexed_value<T>> & row_chunk,indexed_value<T> & row_el_iv)228 void square_sparse_matrix<T, X>::remove_element(vector<indexed_value<T>> & row_chunk, indexed_value<T> & row_el_iv) {
229     auto & column_chunk = get_column_values(row_el_iv.m_index);
230     indexed_value<T> & col_el_iv = column_chunk[row_el_iv.m_other];
231     remove_element(row_chunk, col_el_iv.m_other, column_chunk, row_el_iv.m_other);
232 }
233 
234 template <typename T, typename X>
put_max_index_to_0(vector<indexed_value<T>> & row_vals,unsigned max_index)235 void square_sparse_matrix<T, X>::put_max_index_to_0(vector<indexed_value<T>> & row_vals, unsigned max_index)  {
236     if (max_index == 0) return;
237     indexed_value<T> * max_iv = & row_vals[max_index];
238     indexed_value<T> * start_iv = & row_vals[0];
239     // update the "other" columns elements which are bound to the start_iv and max_iv
240     m_columns[max_iv->m_index].m_values[max_iv->m_other].m_other = 0;
241     m_columns[start_iv->m_index].m_values[start_iv->m_other].m_other = max_index;
242 
243     // swap the elements
244     indexed_value<T> t = * max_iv;
245     * max_iv = * start_iv;
246     * start_iv = t;
247 }
248 
249 template <typename T, typename X>
set_max_in_row(vector<indexed_value<T>> & row_vals)250 void square_sparse_matrix<T, X>::set_max_in_row(vector<indexed_value<T>> & row_vals) {
251     if (row_vals.empty())
252         return;
253     T max_val = abs(row_vals[0].m_value);
254     unsigned max_index = 0;
255     for (unsigned i = 1; i <  row_vals.size(); i++) {
256         T iabs = abs(row_vals[i].m_value);
257         if (iabs > max_val) {
258             max_val = iabs;
259             max_index = i;
260         }
261     }
262     put_max_index_to_0(row_vals, max_index);
263 }
264 
265 template <typename T, typename X>
pivot_with_eta(unsigned i,eta_matrix<T,X> * eta_matrix,lp_settings & settings)266 bool square_sparse_matrix<T, X>::pivot_with_eta(unsigned i, eta_matrix<T, X> *eta_matrix, lp_settings & settings) {
267     const T& pivot = eta_matrix->get_diagonal_element();
268     for (auto & it : eta_matrix->m_column_vector.m_data) {
269         if (!pivot_row_to_row(i, it.second, it.first, settings)) {
270             return false;
271         }
272     }
273 
274     divide_row_by_constant(i, pivot, settings);
275     if (!shorten_active_matrix(i, eta_matrix)) {
276         return false;
277     }
278 
279     return true;
280 }
281 
282 // returns the offset of the pivot column in the row
283 template <typename T, typename X>
scan_row_to_work_vector_and_remove_pivot_column(unsigned row,unsigned pivot_column)284 void square_sparse_matrix<T, X>::scan_row_to_work_vector_and_remove_pivot_column(unsigned row, unsigned pivot_column) {
285     auto & rvals = m_rows[row];
286     unsigned size = rvals.size();
287     for (unsigned j = 0; j < size; j++) {
288         auto & iv = rvals[j];
289         if (iv.m_index != pivot_column) {
290             m_work_pivot_vector[iv.m_index] = j;
291         } else {
292             remove_element(rvals, iv);
293             j--;
294             size--;
295         }
296     }
297 }
298 
299 #ifdef Z3DEBUG
300 template <typename T, typename X>
get_full_row(unsigned i)301 vector<T> square_sparse_matrix<T, X>::get_full_row(unsigned i) const {
302     vector<T> r;
303     for (unsigned j = 0; j < column_count(); j++)
304         r.push_back(get(i, j));
305     return r;
306 }
307 #endif
308 
309 
310 
311 // This method pivots row i to row i0 by muliplying row i by
312 // alpha and adding it to row i0.
313 // After pivoting the row i0 has a max abs value set correctly at the beginning of m_start,
314 // Returns false if the resulting row is all zeroes, and true otherwise
315 template <typename T, typename X>
pivot_row_to_row(unsigned i,const T & alpha,unsigned i0,lp_settings & settings)316 bool square_sparse_matrix<T, X>::pivot_row_to_row(unsigned i, const T& alpha, unsigned i0, lp_settings & settings ) {
317     lp_assert(i < dimension() && i0 < dimension());
318     lp_assert(i != i0);
319     unsigned pivot_col = adjust_column(i);
320     i = adjust_row(i);
321     i0 = adjust_row(i0);
322     vector<unsigned> became_zeros;
323     // the offset of element of the pivot column in row i0
324     scan_row_to_work_vector_and_remove_pivot_column(i0, pivot_col);
325     auto & i0_row_vals = m_rows[i0];
326     // run over the pivot row and update row i0
327     unsigned prev_size_i0 = i0_row_vals.size();
328     for (const auto & iv : m_rows[i]) {
329         unsigned j = iv.m_index;
330         if (j == pivot_col) continue;
331         T alv = alpha * iv.m_value;
332         int j_offs = m_work_pivot_vector[j];
333         if (j_offs == -1) { // it is a new element
334             if (!settings.abs_val_is_smaller_than_drop_tolerance(alv)) {
335                 add_new_element(i0, j, alv);
336             }
337         }
338         else {
339             auto & row_el_iv = i0_row_vals[j_offs];
340             row_el_iv.m_value += alv;
341             if (settings.abs_val_is_smaller_than_drop_tolerance(row_el_iv.m_value)) {
342                 became_zeros.push_back(j_offs);
343                 ensure_increasing(became_zeros);
344             }
345             else {
346                 m_columns[j].m_values[row_el_iv.m_other].set_value(row_el_iv.m_value);
347             }
348         }
349     }
350 
351 
352     // clean the work vector
353     for (unsigned k = 0; k < prev_size_i0; k++) {
354         m_work_pivot_vector[i0_row_vals[k].m_index] = -1;
355     }
356 
357     for (unsigned k = became_zeros.size(); k-- > 0; ) {
358         unsigned j = became_zeros[k];
359         remove_element(i0_row_vals, i0_row_vals[j]);
360         if (i0_row_vals.empty())
361             return false;
362     }
363 
364     if (numeric_traits<T>::precise() == false)
365         set_max_in_row(i0_row_vals);
366 
367     return !i0_row_vals.empty();
368 }
369 
370 
371 
372 // set the max val as well
373 // returns false if the resulting row is all zeroes, and true otherwise
374 template <typename T, typename X>
set_row_from_work_vector_and_clean_work_vector_not_adjusted(unsigned i0,indexed_vector<T> & work_vec,lp_settings & settings)375 bool square_sparse_matrix<T, X>::set_row_from_work_vector_and_clean_work_vector_not_adjusted(unsigned i0, indexed_vector<T> & work_vec,
376                                                                                       lp_settings & settings) {
377     remove_zero_elements_and_set_data_on_existing_elements_not_adjusted(i0, work_vec, settings);
378     // all non-zero elements in m_work_pivot_vector are new
379     for (unsigned j : work_vec.m_index) {
380         if (numeric_traits<T>::is_zero(work_vec[j])) {
381             continue;
382         }
383         lp_assert(!settings.abs_val_is_smaller_than_drop_tolerance(work_vec[j]));
384         add_new_element(i0, adjust_column(j), work_vec[j]);
385         work_vec[j] = numeric_traits<T>::zero();
386     }
387     work_vec.m_index.clear();
388     auto & row_vals = m_rows[i0];
389     if (row_vals.empty()) {
390         return false;
391     }
392     set_max_in_row(row_vals);  // it helps to find larger pivots
393     return true;
394 }
395 
396 
397 
398 template <typename T, typename X>
remove_zero_elements_and_set_data_on_existing_elements(unsigned row)399 void square_sparse_matrix<T, X>::remove_zero_elements_and_set_data_on_existing_elements(unsigned row) {
400     auto & row_vals = m_rows[row];
401     for (unsigned k = static_cast<unsigned>(row_vals.size()); k-- > 0;) { // we cannot simply run the iterator since we are removing
402         // elements from row_vals
403         auto & row_el_iv = row_vals[k];
404         unsigned j = row_el_iv.m_index;
405         T & wj = m_work_pivot_vector[j];
406         if (is_zero(wj)) {
407             remove_element(row_vals, row_el_iv);
408         } else {
409             m_columns[j].m_values[row_el_iv.m_other].set_value(wj);
410             row_el_iv.set_value(wj);
411             wj = zero_of_type<T>();
412         }
413     }
414 }
415 
416 // work_vec here has not adjusted column indices
417 template <typename T, typename X>
remove_zero_elements_and_set_data_on_existing_elements_not_adjusted(unsigned row,indexed_vector<T> & work_vec,lp_settings & settings)418 void square_sparse_matrix<T, X>::remove_zero_elements_and_set_data_on_existing_elements_not_adjusted(unsigned row, indexed_vector<T> & work_vec, lp_settings & settings) {
419     auto & row_vals = m_rows[row];
420     for (unsigned k = static_cast<unsigned>(row_vals.size()); k-- > 0;) { // we cannot simply run the iterator since we are removing
421         // elements from row_vals
422         auto & row_el_iv = row_vals[k];
423         unsigned j = row_el_iv.m_index;
424         unsigned rj = adjust_column_inverse(j);
425         T val = work_vec[rj];
426         if (settings.abs_val_is_smaller_than_drop_tolerance(val)) {
427             remove_element(row_vals, row_el_iv);
428             lp_assert(numeric_traits<T>::is_zero(val));
429         } else {
430             m_columns[j].m_values[row_el_iv.m_other].set_value(row_el_iv.m_value = val);
431             work_vec[rj] = numeric_traits<T>::zero();
432         }
433     }
434 }
435 
436 
437 // adding delta columns at the end of the matrix
438 template <typename T, typename X>
add_columns_at_the_end(unsigned delta)439 void square_sparse_matrix<T, X>::add_columns_at_the_end(unsigned delta) {
440     for (unsigned i = 0; i < delta; i++) {
441         col_header col_head;
442         m_columns.push_back(col_head);
443     }
444     m_column_permutation.enlarge(delta);
445 }
446 
447 template <typename T, typename X>
delete_column(int i)448 void square_sparse_matrix<T, X>::delete_column(int i) {
449     lp_assert(i < dimension());
450     for (auto cell = m_columns[i].m_head; cell != nullptr;) {
451         auto next_cell = cell->m_down;
452         kill_cell(cell);
453         cell = next_cell;
454     }
455 }
456 
457 template <typename T, typename X>
divide_row_by_constant(unsigned i,const T & t,lp_settings & settings)458 void square_sparse_matrix<T, X>::divide_row_by_constant(unsigned i, const T & t, lp_settings & settings) {
459     lp_assert(!settings.abs_val_is_smaller_than_zero_tolerance(t));
460     i = adjust_row(i);
461     for (auto & iv : m_rows[i]) {
462         T &v = iv.m_value;
463         v /= t;
464         if (settings.abs_val_is_smaller_than_drop_tolerance(v)){
465             v = numeric_traits<T>::zero();
466         }
467         m_columns[iv.m_index].m_values[iv.m_other].set_value(v);
468     }
469 }
470 
471 
472 // solving x * this = y, and putting the answer into y
473 // the matrix here has to be upper triangular
474 template <typename T, typename X>
solve_y_U(vector<T> & y)475 void square_sparse_matrix<T, X>::solve_y_U(vector<T> & y) const { // works by rows
476 #ifdef Z3DEBUG
477     // T * rs = clone_vector<T>(y, dimension());
478 #endif
479     unsigned end = dimension();
480     for (unsigned i = 0; i + 1 < end; i++) {
481         // all y[i] has correct values already
482         const T & yv = y[i];
483         if (numeric_traits<T>::is_zero(yv)) continue;
484         auto & mc = get_row_values(adjust_row(i));
485         for (auto & c : mc) {
486             unsigned col = adjust_column_inverse(c.m_index);
487             if (col != i) {
488                 y[col] -= c.m_value * yv;
489             }
490         }
491     }
492 #ifdef Z3DEBUG
493     // dense_matrix<T> deb(*this);
494     // T * clone_y = clone_vector<T>(y, dimension());
495     // deb.apply_from_right(clone_y);
496     // lp_assert(vectors_are_equal(rs, clone_y, dimension()));
497     // delete [] clone_y;
498     // delete [] rs;
499 #endif
500 }
501 
502 // solving x * this = y, and putting the answer into y
503 // the matrix here has to be upper triangular
504 template <typename T, typename X>
solve_y_U_indexed(indexed_vector<T> & y,const lp_settings & settings)505 void square_sparse_matrix<T, X>::solve_y_U_indexed(indexed_vector<T> & y, const lp_settings & settings) {
506 #if 0 && Z3DEBUG
507     vector<T> ycopy(y.m_data);
508     if (numeric_traits<T>::precise() == false)
509         solve_y_U(ycopy);
510 #endif
511     vector<unsigned> sorted_active_columns;
512     extend_and_sort_active_rows(y.m_index, sorted_active_columns);
513     for (unsigned k = sorted_active_columns.size(); k-- > 0; ) {
514         unsigned j = sorted_active_columns[k];
515         auto & yj = y[j];
516         for (auto & c: m_columns[adjust_column(j)].m_values) {
517             unsigned i = adjust_row_inverse(c.m_index);
518             if (i == j) continue;
519             yj -= y[i] * c.m_value;
520         }
521     }
522     y.m_index.clear();
523     for (auto j : sorted_active_columns) {
524         if (!settings.abs_val_is_smaller_than_drop_tolerance(y[j]))
525             y.m_index.push_back(j);
526         else if (!numeric_traits<T>::precise())
527             y.m_data[j] = zero_of_type<T>();
528     }
529 
530     lp_assert(y.is_OK());
531 #if 0 && Z3DEBUG
532     if (numeric_traits<T>::precise() == false)
533         lp_assert(vectors_are_equal(ycopy, y.m_data));
534 #endif
535 }
536 
537 
538 // fills the indices for such that y[i] can be not a zero
539 // sort them so the smaller indices come first
540 //    void fill_reachable_indices(std::set<unsigned> & rset, T *y) {
541 // std::queue<unsigned> q;
542 // int m = dimension();
543 // for (int i = m - 1; i >= 0; i--) {
544 //     if (!numeric_traits<T>::is_zero(y[i])){
545 //         for (cell * c = m_columns[adjust_column(i)].m_head; c != nullptr; c = c->m_down) {
546 //             unsigned row = adjust_row_inverse(c->m_i);
547 //             q.push(row);
548 //         }
549 //     }
550 // }
551 // while (!q.empty()) {
552 //     unsigned i = q.front();
553 //     q.pop();
554 //     for (cell * c = m_columns[adjust_column(i)].m_head; c != nullptr; c = c->m_down) {
555 //         unsigned row = adjust_row_inverse(c->m_i);
556 //         if (rset.find(row) == rset.end()){
557 //             rset.insert(row);
558 //             q.push(row);
559 //         }
560 //     }
561 //     }
562 // }
563 
564 template <typename T, typename X>
565 template <typename L>
find_error_in_solution_U_y(vector<L> & y_orig,vector<L> & y)566 void square_sparse_matrix<T, X>::find_error_in_solution_U_y(vector<L>& y_orig, vector<L> & y) {
567     unsigned i = dimension();
568     while (i--) {
569         y_orig[i] -= dot_product_with_row(i, y);
570     }
571 }
572 
573 template <typename T, typename X>
574 template <typename L>
find_error_in_solution_U_y_indexed(indexed_vector<L> & y_orig,indexed_vector<L> & y,const vector<unsigned> & sorted_active_rows)575 void square_sparse_matrix<T, X>::find_error_in_solution_U_y_indexed(indexed_vector<L>& y_orig, indexed_vector<L> & y,  const vector<unsigned>& sorted_active_rows) {
576     for (unsigned i: sorted_active_rows)
577         y_orig.add_value_at_index(i, -dot_product_with_row(i, y)); // cannot round up here!!!
578     // y_orig can contain very small values
579 }
580 
581 
582 template <typename T, typename X>
583 template <typename L>
add_delta_to_solution(const vector<L> & del,vector<L> & y)584 void square_sparse_matrix<T, X>::add_delta_to_solution(const vector<L>& del, vector<L> & y) {
585     unsigned i = dimension();
586     while (i--) {
587         y[i] += del[i];
588     }
589 }
590 template <typename T, typename X>
591 template <typename L>
add_delta_to_solution(const indexed_vector<L> & del,indexed_vector<L> & y)592 void square_sparse_matrix<T, X>::add_delta_to_solution(const indexed_vector<L>& del, indexed_vector<L> & y) {
593 //    lp_assert(del.is_OK());
594  //   lp_assert(y.is_OK());
595     for (auto i : del.m_index) {
596         y.add_value_at_index(i, del[i]);
597     }
598 }
599 template <typename T, typename X>
600 template <typename L>
double_solve_U_y(indexed_vector<L> & y,const lp_settings & settings)601 void square_sparse_matrix<T, X>::double_solve_U_y(indexed_vector<L>& y, const lp_settings & settings){
602     lp_assert(y.is_OK());
603     indexed_vector<L> y_orig(y); // copy y aside
604     vector<unsigned> active_rows;
605     solve_U_y_indexed_only(y, settings, active_rows);
606     lp_assert(y.is_OK());
607     find_error_in_solution_U_y_indexed(y_orig, y, active_rows);
608     // y_orig contains the error now
609     if (y_orig.m_index.size() * ratio_of_index_size_to_all_size<T>() < 32 * dimension()) {
610         active_rows.clear();
611         solve_U_y_indexed_only(y_orig, settings, active_rows);
612         add_delta_to_solution(y_orig, y);
613         y.clean_up();
614     } else { // the dense version
615         solve_U_y(y_orig.m_data);
616         add_delta_to_solution(y_orig.m_data, y.m_data);
617         y.restore_index_and_clean_from_data();
618     }
619     lp_assert(y.is_OK());
620 }
621 template <typename T, typename X>
622 template <typename L>
double_solve_U_y(vector<L> & y)623 void square_sparse_matrix<T, X>::double_solve_U_y(vector<L>& y){
624     vector<L> y_orig(y); // copy y aside
625     solve_U_y(y);
626     find_error_in_solution_U_y(y_orig, y);
627     // y_orig contains the error now
628     solve_U_y(y_orig);
629     add_delta_to_solution(y_orig, y);
630 }
631 
632 // solving this * x = y, and putting the answer into y
633 // the matrix here has to be upper triangular
634 template <typename T, typename X>
635 template <typename L>
solve_U_y(vector<L> & y)636 void square_sparse_matrix<T, X>::solve_U_y(vector<L> & y) { // it is a column wise version
637 #ifdef Z3DEBUG
638     // T * rs = clone_vector<T>(y, dimension());
639 #endif
640 
641     for (unsigned j = dimension(); j--; ) {
642         const L & yj = y[j];
643         if (is_zero(yj)) continue;
644         for (const auto & iv : m_columns[adjust_column(j)].m_values) {
645             unsigned i = adjust_row_inverse(iv.m_index);
646             if (i != j) {
647                 y[i] -= iv.m_value * yj;
648             }
649         }
650     }
651 #ifdef Z3DEBUG
652     // dense_matrix<T> deb(*this);
653     // T * clone_y = clone_vector<T>(y, dimension());
654     // deb.apply_from_left(clone_y);
655     // lp_assert(vectors_are_equal(rs, clone_y, dimension()));
656 #endif
657 }
658 template <typename T, typename X>
process_index_recursively_for_y_U(unsigned j,vector<unsigned> & sorted_active_rows)659 void square_sparse_matrix<T, X>::process_index_recursively_for_y_U(unsigned j, vector<unsigned> & sorted_active_rows) {
660     lp_assert(m_processed[j] == false);
661     m_processed[j]=true;
662     auto & row = m_rows[adjust_row(j)];
663     for (auto & c : row) {
664         unsigned i = adjust_column_inverse(c.m_index);
665         if (i == j) continue;
666         if (!m_processed[i]) {
667             process_index_recursively_for_y_U(i, sorted_active_rows);
668         }
669     }
670     sorted_active_rows.push_back(j);
671 }
672 
673 template <typename T, typename X>
process_column_recursively(unsigned j,vector<unsigned> & sorted_active_rows)674 void square_sparse_matrix<T, X>::process_column_recursively(unsigned j, vector<unsigned> & sorted_active_rows) {
675     lp_assert(m_processed[j] == false);
676     auto & mc = m_columns[adjust_column(j)].m_values;
677     for (auto & iv : mc) {
678         unsigned i = adjust_row_inverse(iv.m_index);
679         if (i == j) continue;
680         if (!m_processed[i]) {
681             process_column_recursively(i, sorted_active_rows);
682         }
683     }
684     m_processed[j]=true;
685     sorted_active_rows.push_back(j);
686 }
687 
688 
689 template <typename T, typename X>
create_graph_G(const vector<unsigned> & index_or_right_side,vector<unsigned> & sorted_active_rows)690 void square_sparse_matrix<T, X>::create_graph_G(const vector<unsigned> & index_or_right_side, vector<unsigned> & sorted_active_rows) {
691     for (auto i : index_or_right_side) {
692         if (m_processed[i]) continue;
693         process_column_recursively(i, sorted_active_rows);
694     }
695 
696     for (auto i : sorted_active_rows) {
697         m_processed[i] = false;
698     }
699 }
700 
701 
702 template <typename T, typename X>
extend_and_sort_active_rows(const vector<unsigned> & index_or_right_side,vector<unsigned> & sorted_active_rows)703 void square_sparse_matrix<T, X>::extend_and_sort_active_rows(const vector<unsigned> & index_or_right_side, vector<unsigned> & sorted_active_rows) {
704     for (auto i : index_or_right_side) {
705         if (m_processed[i]) continue;
706         process_index_recursively_for_y_U(i, sorted_active_rows);
707     }
708 
709     for (auto i : sorted_active_rows) {
710         m_processed[i] = false;
711     }
712 }
713 
714 
715 template <typename T, typename X>
716 template <typename L>
solve_U_y_indexed_only(indexed_vector<L> & y,const lp_settings & settings,vector<unsigned> & sorted_active_rows)717 void square_sparse_matrix<T, X>::solve_U_y_indexed_only(indexed_vector<L> & y, const lp_settings & settings, vector<unsigned> & sorted_active_rows) { // it is a column wise version
718     create_graph_G(y.m_index, sorted_active_rows);
719 
720     for (auto k = sorted_active_rows.size(); k-- > 0;) {
721         unsigned j = sorted_active_rows[k];
722         const L & yj = y[j];
723         if (is_zero(yj)) continue;
724         auto & mc = m_columns[adjust_column(j)].m_values;
725         for (auto & iv : mc) {
726             unsigned i = adjust_row_inverse(iv.m_index);
727             if (i != j) {
728                 y[i] -= iv.m_value * yj;
729             }
730         }
731     }
732     y.m_index.clear();
733     for (auto j : sorted_active_rows) {
734         if (!settings.abs_val_is_smaller_than_drop_tolerance(y[j]))
735             y.m_index.push_back(j);
736         else if (!numeric_traits<L>::precise())
737             y[j] = zero_of_type<L>();
738     }
739 
740     lp_assert(y.is_OK());
741 #ifdef Z3DEBUG
742      // dense_matrix<T,X> deb(this);
743      // vector<T> clone_y(y.m_data);
744      // deb.apply_from_left(clone_y);
745      // lp_assert(vectors_are_equal(rs, clone_y));
746 #endif
747 }
748 
749 template <typename T, typename X>
750 template <typename L>
dot_product_with_row(unsigned row,const vector<L> & y)751 L square_sparse_matrix<T, X>::dot_product_with_row (unsigned row, const vector<L> & y) const {
752     L ret = zero_of_type<L>();
753     auto & mc = get_row_values(adjust_row(row));
754     for (auto & c : mc) {
755         unsigned col = m_column_permutation[c.m_index];
756         ret += c.m_value * y[col];
757     }
758     return ret;
759 }
760 
761 template <typename T, typename X>
762 template <typename L>
dot_product_with_row(unsigned row,const indexed_vector<L> & y)763 L square_sparse_matrix<T, X>::dot_product_with_row (unsigned row, const indexed_vector<L> & y) const {
764     L ret = zero_of_type<L>();
765     auto & mc = get_row_values(adjust_row(row));
766     for (auto & c : mc) {
767         unsigned col = m_column_permutation[c.m_index];
768         ret += c.m_value * y[col];
769     }
770     return ret;
771 }
772 
773 
774 template <typename T, typename X>
get_number_of_nonzeroes()775 unsigned square_sparse_matrix<T, X>::get_number_of_nonzeroes() const {
776     unsigned ret = 0;
777     for (unsigned i = dimension(); i--; ) {
778         ret += number_of_non_zeroes_in_row(i);
779     }
780     return ret;
781 }
782 
783 template <typename T, typename X>
get_non_zero_column_in_row(unsigned i,unsigned * j)784 bool square_sparse_matrix<T, X>::get_non_zero_column_in_row(unsigned i, unsigned *j) const {
785     // go over the i-th row
786     auto & mc = get_row_values(adjust_row(i));
787     if (mc.size() > 0) {
788         *j = m_column_permutation[mc[0].m_index];
789         return true;
790     }
791     return false;
792 }
793 
794 template <typename T, typename X>
remove_element_that_is_not_in_w(vector<indexed_value<T>> & column_vals,indexed_value<T> & col_el_iv)795 void square_sparse_matrix<T, X>::remove_element_that_is_not_in_w(vector<indexed_value<T>> & column_vals, indexed_value<T> & col_el_iv) {
796     auto & row_chunk = m_rows[col_el_iv.m_index];
797     indexed_value<T> & row_el_iv = row_chunk[col_el_iv.m_other];
798     unsigned index_in_row = col_el_iv.m_other;
799     remove_element(row_chunk, col_el_iv.m_other, column_vals, row_el_iv.m_other);
800     if (index_in_row == 0)
801         set_max_in_row(row_chunk);
802 }
803 
804 
805 // w contains the new column
806 // the old column inside of the matrix has not been changed yet
807 template <typename T, typename X>
remove_elements_that_are_not_in_w_and_update_common_elements(unsigned column_to_replace,indexed_vector<T> & w)808 void square_sparse_matrix<T, X>::remove_elements_that_are_not_in_w_and_update_common_elements(unsigned column_to_replace, indexed_vector<T> & w) {
809     // --------------------------------
810     // column_vals represents the old column
811     auto & column_vals = m_columns[column_to_replace].m_values;
812     for (unsigned k = static_cast<unsigned>(column_vals.size()); k-- > 0;) {
813         indexed_value<T> & col_el_iv = column_vals[k];
814         unsigned i = col_el_iv.m_index;
815         T &w_data_at_i = w[adjust_row_inverse(i)];
816         if (numeric_traits<T>::is_zero(w_data_at_i)) {
817             remove_element_that_is_not_in_w(column_vals, col_el_iv);
818         } else {
819             auto& row_chunk = m_rows[i];
820             unsigned index_in_row = col_el_iv.m_other;
821             if (index_in_row == 0) {
822                 bool look_for_max = abs(w_data_at_i) < abs(row_chunk[0].m_value);
823                 row_chunk[0].set_value(col_el_iv.m_value = w_data_at_i);
824                 if (look_for_max)
825                     set_max_in_row(i);
826             } else {
827                 row_chunk[index_in_row].set_value(col_el_iv.m_value = w_data_at_i);
828                 if (abs(w_data_at_i) > abs(row_chunk[0].m_value))
829                     put_max_index_to_0(row_chunk, index_in_row);
830             }
831             w_data_at_i = numeric_traits<T>::zero();
832         }
833     }
834 }
835 
836 template <typename T, typename X>
add_new_element(unsigned row,unsigned col,const T & val)837 void square_sparse_matrix<T, X>::add_new_element(unsigned row, unsigned col, const T& val) {
838     auto & row_vals = m_rows[row];
839     auto & col_vals = m_columns[col].m_values;
840     unsigned row_el_offs = static_cast<unsigned>(row_vals.size());
841     unsigned col_el_offs = static_cast<unsigned>(col_vals.size());
842     row_vals.push_back(indexed_value<T>(val, col, col_el_offs));
843     col_vals.push_back(indexed_value<T>(val, row, row_el_offs));
844     m_n_of_active_elems++;
845 }
846 
847 // w contains the "rest" of the new column; all common elements of w and the old column has been zeroed
848 // the old column inside of the matrix has not been changed yet
849 template <typename T, typename X>
add_new_elements_of_w_and_clear_w(unsigned column_to_replace,indexed_vector<T> & w,lp_settings & settings)850 void square_sparse_matrix<T, X>::add_new_elements_of_w_and_clear_w(unsigned column_to_replace, indexed_vector<T> & w, lp_settings & settings) {
851     for (unsigned i : w.m_index) {
852         T w_at_i = w[i];
853         if (numeric_traits<T>::is_zero(w_at_i)) continue; // was dealt with already
854         if (!settings.abs_val_is_smaller_than_drop_tolerance(w_at_i)) {
855             unsigned ai = adjust_row(i);
856             add_new_element(ai, column_to_replace, w_at_i);
857             auto & row_chunk = m_rows[ai];
858             lp_assert(row_chunk.size() > 0);
859             if (abs(w_at_i) > abs(row_chunk[0].m_value))
860                 put_max_index_to_0(row_chunk, static_cast<unsigned>(row_chunk.size()) - 1);
861         }
862         w[i] = numeric_traits<T>::zero();
863     }
864     w.m_index.clear();
865 }
866 
867 template <typename T, typename X>
replace_column(unsigned column_to_replace,indexed_vector<T> & w,lp_settings & settings)868 void square_sparse_matrix<T, X>::replace_column(unsigned column_to_replace, indexed_vector<T> & w, lp_settings &settings) {
869     column_to_replace = adjust_column(column_to_replace);
870     remove_elements_that_are_not_in_w_and_update_common_elements(column_to_replace, w);
871     add_new_elements_of_w_and_clear_w(column_to_replace, w, settings);
872 }
873 
874 template <typename T, typename X>
pivot_score(unsigned i,unsigned j)875 unsigned square_sparse_matrix<T, X>::pivot_score(unsigned i, unsigned j) {
876     // It goes like this (rnz-1)(cnz-1) is the Markovitz number, that is the max number of
877     // new non zeroes we can obtain after the pivoting.
878     // In addition we will get another cnz - 1 elements in the eta matrix created for this pivot,
879     // which gives rnz(cnz-1). For example, is 0 for a column singleton, but not for
880     // a row singleton ( which is not a column singleton).
881 
882     auto col_header = m_columns[j];
883 
884     return static_cast<unsigned>(get_row_values(i).size() * (col_header.m_values.size() - col_header.m_shortened_markovitz - 1));
885 }
886 
887 template <typename T, typename X>
enqueue_domain_into_pivot_queue()888 void square_sparse_matrix<T, X>::enqueue_domain_into_pivot_queue() {
889     lp_assert(m_pivot_queue.size() == 0);
890     for (unsigned i = 0; i < dimension(); i++) {
891         auto & rh = m_rows[i];
892         unsigned rnz = static_cast<unsigned>(rh.size());
893         for (auto iv : rh) {
894             unsigned j = iv.m_index;
895             m_pivot_queue.enqueue(i, j, rnz * (static_cast<unsigned>(m_columns[j].m_values.size()) - 1));
896         }
897     }
898 }
899 
900 template <typename T, typename X>
set_max_in_rows()901 void square_sparse_matrix<T, X>::set_max_in_rows() {
902     unsigned i = dimension();
903     while (i--)
904         set_max_in_row(i);
905 }
906 
907 
908 template <typename T, typename X>
zero_shortened_markovitz_numbers()909 void square_sparse_matrix<T, X>::zero_shortened_markovitz_numbers() {
910     for (auto & ch : m_columns)
911         ch.zero_shortened_markovitz();
912 }
913 
914 template <typename T, typename X>
prepare_for_factorization()915 void square_sparse_matrix<T, X>::prepare_for_factorization() {
916     zero_shortened_markovitz_numbers();
917     set_max_in_rows();
918     enqueue_domain_into_pivot_queue();
919 }
920 
921 template <typename T, typename X>
recover_pivot_queue(vector<upair> & rejected_pivots)922 void square_sparse_matrix<T, X>::recover_pivot_queue(vector<upair> & rejected_pivots)  {
923     for (auto p : rejected_pivots) {
924         m_pivot_queue.enqueue(p.first, p.second, pivot_score(p.first, p.second));
925     }
926 }
927 
928 template <typename T, typename X>
elem_is_too_small(unsigned i,unsigned j,int c_partial_pivoting)929 int square_sparse_matrix<T, X>::elem_is_too_small(unsigned i, unsigned j, int c_partial_pivoting)  {
930     auto & row_chunk = m_rows[i];
931 
932     if (j == row_chunk[0].m_index) {
933         return 0; // the max element is at the head
934     }
935     T max = abs(row_chunk[0].m_value);
936     for (unsigned k = 1; k < row_chunk.size(); k++) {
937         auto &iv = row_chunk[k];
938         if (iv.m_index == j)
939             return abs(iv.m_value) * c_partial_pivoting < max ? 1: 0;
940     }
941     return 2;  // the element became zero but it still sits in the active pivots?
942 }
943 
944 template <typename T, typename X>
remove_row_from_active_pivots_and_shorten_columns(unsigned row)945 bool square_sparse_matrix<T, X>::remove_row_from_active_pivots_and_shorten_columns(unsigned row) {
946     unsigned arow = adjust_row(row);
947     for (auto & iv : m_rows[arow]) {
948         m_pivot_queue.remove(arow, iv.m_index);
949         m_n_of_active_elems--;  // the value is correct only when refactoring
950         if (adjust_column_inverse(iv.m_index) <= row)
951             continue; // this column will be removed anyway
952         auto & col = m_columns[iv.m_index];
953 
954         col.shorten_markovich_by_one();
955         if (col.m_values.size() <= col.m_shortened_markovitz)
956             return false; // got a zero column
957     }
958     return true;
959 }
960 
961 template <typename T, typename X>
remove_pivot_column(unsigned row)962 void square_sparse_matrix<T, X>::remove_pivot_column(unsigned row) {
963     unsigned acol = adjust_column(row);
964     for (const auto & iv : m_columns[acol].m_values)
965         if (adjust_row_inverse(iv.m_index) >= row)
966             m_pivot_queue.remove(iv.m_index, acol);
967 }
968 
969 template <typename T, typename X>
update_active_pivots(unsigned row)970 void square_sparse_matrix<T, X>::update_active_pivots(unsigned row) {
971     unsigned arow = adjust_row(row);
972     for (const auto & iv : m_rows[arow]) {
973         col_header & ch = m_columns[iv.m_index];
974         int cols = static_cast<int>(ch.m_values.size()) - ch.m_shortened_markovitz - 1;
975         lp_assert(cols >= 0);
976         for (const auto &ivc : ch.m_values) {
977             unsigned i = ivc.m_index;
978             if (adjust_row_inverse(i) <= row) continue; // the i is not an active row
979             m_pivot_queue.enqueue(i, iv.m_index, static_cast<unsigned>(m_rows[i].size())*cols);
980         }
981     }
982 }
983 
984 template <typename T, typename X>
shorten_active_matrix(unsigned row,eta_matrix<T,X> * eta_matrix)985 bool square_sparse_matrix<T, X>::shorten_active_matrix(unsigned row, eta_matrix<T, X> *eta_matrix) {
986     if (!remove_row_from_active_pivots_and_shorten_columns(row))
987         return false;
988     remove_pivot_column(row);
989     // need to know the max priority of the queue here
990     update_active_pivots(row);
991     if (eta_matrix == nullptr) return true;
992     // it looks like double work, but the pivot scores have changed for all rows
993     // touched by eta_matrix
994     for (auto & it : eta_matrix->m_column_vector.m_data) {
995         unsigned row = adjust_row(it.first);
996         const auto & row_values = m_rows[row];
997         unsigned rnz = static_cast<unsigned>(row_values.size());
998         for (auto & iv : row_values) {
999             const col_header& ch = m_columns[iv.m_index];
1000             int cnz = static_cast<int>(ch.m_values.size()) - ch.m_shortened_markovitz - 1;
1001             lp_assert(cnz >= 0);
1002             m_pivot_queue.enqueue(row, iv.m_index, rnz * cnz);
1003         }
1004     }
1005 
1006     return true;
1007 }
1008 
1009 template <typename T, typename X>
pivot_score_without_shortened_counters(unsigned i,unsigned j,unsigned k)1010 unsigned square_sparse_matrix<T, X>::pivot_score_without_shortened_counters(unsigned i, unsigned j, unsigned k) {
1011     auto &cols = m_columns[j].m_values;
1012     unsigned cnz = cols.size();
1013     for (auto & iv : cols) {
1014         if (adjust_row_inverse(iv.m_index) < k)
1015             cnz--;
1016     }
1017     lp_assert(cnz > 0);
1018     return m_rows[i].m_values.size() * (cnz - 1);
1019 }
1020 #ifdef Z3DEBUG
1021 template <typename T, typename X>
can_improve_score_for_row(unsigned row,unsigned score,T const & c_partial_pivoting,unsigned k)1022 bool square_sparse_matrix<T, X>::can_improve_score_for_row(unsigned row, unsigned score, T const & c_partial_pivoting, unsigned k) {
1023     unsigned arow = adjust_row(row);
1024     auto & row_vals = m_rows[arow].m_values;
1025     auto & begin_iv = row_vals[0];
1026     T row_max = abs(begin_iv.m_value);
1027     lp_assert(adjust_column_inverse(begin_iv.m_index) >= k);
1028     if (pivot_score_without_shortened_counters(arow, begin_iv.m_index, k) < score) {
1029         print_active_matrix(k);
1030         return true;
1031     }
1032     for (unsigned jj = 1; jj < row_vals.size(); jj++) {
1033         auto & iv = row_vals[jj];
1034         lp_assert(adjust_column_inverse(iv.m_index) >= k);
1035         lp_assert(abs(iv.m_value) <= row_max);
1036         if (c_partial_pivoting * abs(iv.m_value) < row_max) continue;
1037         if (pivot_score_without_shortened_counters(arow, iv.m_index, k) < score) {
1038             print_active_matrix(k);
1039             return true;
1040         }
1041     }
1042     return false;
1043 }
1044 
1045 template <typename T, typename X>
really_best_pivot(unsigned i,unsigned j,T const & c_partial_pivoting,unsigned k)1046 bool square_sparse_matrix<T, X>::really_best_pivot(unsigned i, unsigned j, T const & c_partial_pivoting, unsigned k) {
1047     unsigned queue_pivot_score = pivot_score_without_shortened_counters(i, j, k);
1048     for (unsigned ii = k; ii < dimension(); ii++) {
1049         lp_assert(!can_improve_score_for_row(ii, queue_pivot_score, c_partial_pivoting, k));
1050     }
1051     return true;
1052 }
1053 template <typename T, typename X>
print_active_matrix(unsigned k,std::ostream & out)1054 void square_sparse_matrix<T, X>::print_active_matrix(unsigned k, std::ostream & out) {
1055     out << "active matrix for k = " << k << std::endl;
1056     if (k >= dimension()) {
1057         out << "empty" << std::endl;
1058         return;
1059     }
1060     unsigned dim = dimension() - k;
1061     dense_matrix<T, X> b(dim, dim);
1062     for (unsigned i = 0; i < dim; i++)
1063         for (unsigned j = 0; j < dim; j++ )
1064             b.set_elem(i, j, zero_of_type<T>());
1065     for (int i = k; i < dimension(); i++) {
1066         unsigned col = adjust_column(i);
1067         for (auto &iv : get_column_values(col)) {
1068             unsigned row = iv.m_index;
1069             unsigned row_ex = this->adjust_row_inverse(row);
1070             if (row_ex < k) continue;
1071             auto v = this->get_not_adjusted(row, col);
1072             b.set_elem(row_ex - k, i -k, v);
1073         }
1074     }
1075     print_matrix(b, out);
1076 }
1077 
1078 template <typename T, typename X>
pivot_queue_is_correct_for_row(unsigned i,unsigned k)1079 bool square_sparse_matrix<T, X>::pivot_queue_is_correct_for_row(unsigned i, unsigned k) {
1080     unsigned arow = adjust_row(i);
1081     for (auto & iv : m_rows[arow].m_values) {
1082         lp_assert(pivot_score_without_shortened_counters(arow, iv.m_index, k + 1) ==
1083                     m_pivot_queue.get_priority(arow, iv.m_index));
1084     }
1085     return true;
1086 }
1087 
1088 template <typename T, typename X>
pivot_queue_is_correct_after_pivoting(int k)1089 bool square_sparse_matrix<T, X>::pivot_queue_is_correct_after_pivoting(int k) {
1090     for (unsigned i = k + 1; i < dimension(); i++ )
1091         lp_assert(pivot_queue_is_correct_for_row(i, k));
1092     lp_assert(m_pivot_queue.is_correct());
1093     return true;
1094 }
1095 #endif
1096 
1097 template <typename T, typename X>
get_pivot_for_column(unsigned & i,unsigned & j,int c_partial_pivoting,unsigned k)1098 bool square_sparse_matrix<T, X>::get_pivot_for_column(unsigned &i, unsigned &j, int c_partial_pivoting, unsigned k) {
1099     vector<upair> pivots_candidates_that_are_too_small;
1100     while (!m_pivot_queue.is_empty()) {
1101         m_pivot_queue.dequeue(i, j);
1102         unsigned i_inv = adjust_row_inverse(i);
1103         if (i_inv < k) continue;
1104         unsigned j_inv = adjust_column_inverse(j);
1105         if (j_inv < k) continue;
1106         int _small = elem_is_too_small(i, j, c_partial_pivoting);
1107         if (!_small) {
1108 #ifdef Z3DEBUG
1109             // if (!really_best_pivot(i, j, c_partial_pivoting, k)) {
1110             //     print_active_matrix(k);
1111             //     lp_assert(false);
1112             //  }
1113 #endif
1114             recover_pivot_queue(pivots_candidates_that_are_too_small);
1115             i = i_inv;
1116             j = j_inv;
1117             return true;
1118         }
1119         if (_small != 2) { // 2 means that the pair is not in the matrix
1120             pivots_candidates_that_are_too_small.push_back(std::make_pair(i, j));
1121         }
1122     }
1123     recover_pivot_queue(pivots_candidates_that_are_too_small);
1124     return false;
1125 }
1126 
1127 template <typename T, typename X>
elem_is_too_small(vector<indexed_value<T>> & row_chunk,indexed_value<T> & iv,int c_partial_pivoting)1128 bool square_sparse_matrix<T, X>::elem_is_too_small(vector<indexed_value<T>> & row_chunk, indexed_value<T> & iv, int c_partial_pivoting) {
1129     if (&iv == &row_chunk[0]) {
1130         return false; // the max element is at the head
1131     }
1132     T val = abs(iv.m_value);
1133     T max = abs(row_chunk[0].m_value);
1134     return val * c_partial_pivoting < max;
1135 }
1136 
1137 template <typename T, typename X>
shorten_columns_by_pivot_row(unsigned i,unsigned pivot_column)1138 bool square_sparse_matrix<T, X>::shorten_columns_by_pivot_row(unsigned i, unsigned pivot_column) {
1139     vector<indexed_value<T>> & row_chunk = get_row_values(i);
1140 
1141     for (indexed_value<T> & iv : row_chunk) {
1142         unsigned j = iv.m_index;
1143         if (j == pivot_column) {
1144             lp_assert(!col_is_active(j));
1145             continue;
1146         }
1147         m_columns[j].shorten_markovich_by_one();
1148 
1149         if (m_columns[j].m_shortened_markovitz >= get_column_values(j).size()) { // got the zero column under the row!
1150             return false;
1151         }
1152     }
1153     return true;
1154 }
1155 
1156 template <typename T, typename X>
fill_eta_matrix(unsigned j,eta_matrix<T,X> ** eta)1157 bool square_sparse_matrix<T, X>::fill_eta_matrix(unsigned j, eta_matrix<T, X> ** eta) {
1158     const vector<indexed_value<T>> & col_chunk = get_column_values(adjust_column(j));
1159     bool is_unit = true;
1160     for (const auto & iv : col_chunk) {
1161         unsigned i = adjust_row_inverse(iv.m_index);
1162         if (i > j) {
1163             is_unit = false;
1164             break;
1165         }
1166         if (i == j && iv.m_value != 1) {
1167             is_unit = false;
1168             break;
1169         }
1170     }
1171 
1172     if (is_unit) {
1173         *eta = nullptr;
1174         return true;
1175     }
1176 
1177 #ifdef Z3DEBUG
1178     *eta = new eta_matrix<T, X>(j, dimension());
1179 #else
1180     *eta = new eta_matrix<T, X>(j);
1181 #endif
1182     for (const auto & iv : col_chunk) {
1183         unsigned i = adjust_row_inverse(iv.m_index);
1184         if (i < j) {
1185             continue;
1186         }
1187         if (i > j) {
1188             (*eta)->push_back(i, - iv.m_value);
1189         } else { // i == j
1190             if ( !(*eta)->set_diagonal_element(iv.m_value)) {
1191                 delete *eta;
1192                 *eta = nullptr;
1193                 return false;
1194             }
1195 
1196         }
1197     }
1198 
1199     (*eta)->divide_by_diagonal_element();
1200     return true;
1201 }
1202 #ifdef Z3DEBUG
1203 template <typename T, typename X>
is_upper_triangular_and_maximums_are_set_correctly_in_rows(lp_settings & settings)1204 bool square_sparse_matrix<T, X>::is_upper_triangular_and_maximums_are_set_correctly_in_rows(lp_settings & settings) const {
1205     for (unsigned i = 0; i < dimension(); i++) {
1206         vector<indexed_value<T>> const & row_chunk = get_row_values(i);
1207         lp_assert(row_chunk.size());
1208         T const & max = abs(row_chunk[0].m_value);
1209         unsigned ai = adjust_row_inverse(i);
1210         for (auto & iv : row_chunk) {
1211             lp_assert(abs(iv.m_value) <= max);
1212             unsigned aj = adjust_column_inverse(iv.m_index);
1213             if (!(ai <= aj || numeric_traits<T>::is_zero(iv.m_value)))
1214                 return false;
1215             if (aj == ai) {
1216                 if (iv.m_value != 1) {
1217                     return false;
1218                 }
1219             }
1220             if (settings.abs_val_is_smaller_than_drop_tolerance(iv.m_value) && (!is_zero(iv.m_value)))
1221                 return false;
1222         }
1223     }
1224     return true;
1225 }
1226 
1227 template <typename T, typename X>
is_upper_triangular_until(unsigned k)1228 bool square_sparse_matrix<T, X>::is_upper_triangular_until(unsigned k) const {
1229     for (unsigned j = 0; j < dimension() && j < k; j++) {
1230         unsigned aj = adjust_column(j);
1231         auto & col = get_column_values(aj);
1232         for (auto & iv : col) {
1233             unsigned row = adjust_row_inverse(iv.m_index);
1234             if (row > j)
1235                 return false;
1236         }
1237     }
1238     return true;
1239 }
1240 
1241 template <typename T, typename X>
check_column_vs_rows(unsigned col)1242 void square_sparse_matrix<T, X>::check_column_vs_rows(unsigned col) {
1243     auto & mc = get_column_values(col);
1244     for (indexed_value<T> & column_iv : mc) {
1245         indexed_value<T> & row_iv = column_iv_other(column_iv);
1246         if (row_iv.m_index != col) {
1247             lp_assert(false);
1248         }
1249 
1250         if (& row_iv_other(row_iv) != &column_iv) {
1251             lp_assert(false);
1252         }
1253 
1254         if (row_iv.m_value != column_iv.m_value) {
1255             lp_assert(false);
1256         }
1257     }
1258 }
1259 
1260 template <typename T, typename X>
check_row_vs_columns(unsigned row)1261 void square_sparse_matrix<T, X>::check_row_vs_columns(unsigned row) {
1262     auto & mc = get_row_values(row);
1263     for (indexed_value<T> & row_iv : mc) {
1264         indexed_value<T> & column_iv = row_iv_other(row_iv);
1265 
1266         if (column_iv.m_index != row) {
1267             lp_assert(false);
1268         }
1269 
1270         if (& row_iv != & column_iv_other(column_iv)) {
1271             lp_assert(false);
1272         }
1273 
1274         if (row_iv.m_value != column_iv.m_value) {
1275             lp_assert(false);
1276         }
1277     }
1278 }
1279 
1280 template <typename T, typename X>
check_rows_vs_columns()1281 void square_sparse_matrix<T, X>::check_rows_vs_columns() {
1282     for (unsigned i = 0; i < dimension(); i++) {
1283         check_row_vs_columns(i);
1284     }
1285 }
1286 
1287 template <typename T, typename X>
check_columns_vs_rows()1288 void square_sparse_matrix<T, X>::check_columns_vs_rows() {
1289     for (unsigned i = 0; i < dimension(); i++) {
1290         check_column_vs_rows(i);
1291     }
1292 }
1293 template <typename T, typename X>
check_matrix()1294 void square_sparse_matrix<T, X>::check_matrix() {
1295     check_rows_vs_columns();
1296     check_columns_vs_rows();
1297 }
1298 #endif
1299 }
1300 
1301