1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 //     http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 #include "ortools/sat/util.h"
15 
16 #include <algorithm>
17 #include <cmath>
18 #include <cstdint>
19 
20 #include "absl/numeric/int128.h"
21 #include "ortools/base/stl_util.h"
22 
23 namespace operations_research {
24 namespace sat {
25 
26 namespace {
27 // This will be optimized into one division. I tested that in other places:
28 //
29 // Note that I am not 100% sure we need the indirection for the optimization
30 // to kick in though, but this seemed safer given our weird r[i ^ 1] inputs.
QuotientAndRemainder(int64_t a,int64_t b,int64_t & q,int64_t & r)31 void QuotientAndRemainder(int64_t a, int64_t b, int64_t& q, int64_t& r) {
32   q = a / b;
33   r = a % b;
34 }
35 }  // namespace
36 
RandomizeDecisionHeuristic(absl::BitGenRef random,SatParameters * parameters)37 void RandomizeDecisionHeuristic(absl::BitGenRef random,
38                                 SatParameters* parameters) {
39 #if !defined(__PORTABLE_PLATFORM__)
40   // Random preferred variable order.
41   const google::protobuf::EnumDescriptor* order_d =
42       SatParameters::VariableOrder_descriptor();
43   parameters->set_preferred_variable_order(
44       static_cast<SatParameters::VariableOrder>(
45           order_d->value(absl::Uniform(random, 0, order_d->value_count()))
46               ->number()));
47 
48   // Random polarity initial value.
49   const google::protobuf::EnumDescriptor* polarity_d =
50       SatParameters::Polarity_descriptor();
51   parameters->set_initial_polarity(static_cast<SatParameters::Polarity>(
52       polarity_d->value(absl::Uniform(random, 0, polarity_d->value_count()))
53           ->number()));
54 #endif  // __PORTABLE_PLATFORM__
55   // Other random parameters.
56   parameters->set_use_phase_saving(absl::Bernoulli(random, 0.5));
57   parameters->set_random_polarity_ratio(absl::Bernoulli(random, 0.5) ? 0.01
58                                                                      : 0.0);
59   parameters->set_random_branches_ratio(absl::Bernoulli(random, 0.5) ? 0.01
60                                                                      : 0.0);
61 }
62 
63 // Using the extended Euclidian algo, we find a and b such that a x + b m = gcd.
64 // https://en.wikipedia.org/wiki/Extended_Euclidean_algorithm
ModularInverse(int64_t x,int64_t m)65 int64_t ModularInverse(int64_t x, int64_t m) {
66   DCHECK_GE(x, 0);
67   DCHECK_LT(x, m);
68 
69   int64_t r[2] = {m, x};
70   int64_t t[2] = {0, 1};
71   int64_t q;
72 
73   // We only keep the last two terms of the sequences with the "^1" trick:
74   //
75   // q = r[i-2] / r[i-1]
76   // r[i] = r[i-2] % r[i-1]
77   // t[i] = t[i-2] - t[i-1] * q
78   //
79   // We always have:
80   // - gcd(r[i], r[i - 1]) = gcd(r[i - 1], r[i - 2])
81   // - x * t[i] + m * t[i - 1] = r[i]
82   int i = 0;
83   for (; r[i ^ 1] != 0; i ^= 1) {
84     QuotientAndRemainder(r[i], r[i ^ 1], q, r[i]);
85     t[i] -= t[i ^ 1] * q;
86   }
87 
88   // If the gcd is not one, there is no inverse, we returns 0.
89   if (r[i] != 1) return 0;
90 
91   // Correct the result so that it is in [0, m). Note that abs(t[i]) is known to
92   // be less than or equal to x / 2, and we have thorough unit-tests.
93   if (t[i] < 0) t[i] += m;
94 
95   return t[i];
96 }
97 
PositiveMod(int64_t x,int64_t m)98 int64_t PositiveMod(int64_t x, int64_t m) {
99   const int64_t r = x % m;
100   return r < 0 ? r + m : r;
101 }
102 
ProductWithModularInverse(int64_t coeff,int64_t mod,int64_t rhs)103 int64_t ProductWithModularInverse(int64_t coeff, int64_t mod, int64_t rhs) {
104   DCHECK_NE(coeff, 0);
105   DCHECK_NE(mod, 0);
106 
107   mod = std::abs(mod);
108   if (rhs == 0 || mod == 1) return 0;
109   DCHECK_EQ(std::gcd(std::abs(coeff), mod), 1);
110 
111   // Make both in [0, mod).
112   coeff = PositiveMod(coeff, mod);
113   rhs = PositiveMod(rhs, mod);
114 
115   // From X * coeff % mod = rhs
116   // We deduce that X % mod = rhs * inverse % mod
117   const int64_t inverse = ModularInverse(coeff, mod);
118   CHECK_NE(inverse, 0);
119 
120   // We make the operation in 128 bits to be sure not to have any overflow here.
121   const absl::int128 p = absl::int128{inverse} * absl::int128{rhs};
122   return static_cast<int64_t>(p % absl::int128{mod});
123 }
124 
SolveDiophantineEquationOfSizeTwo(int64_t & a,int64_t & b,int64_t & cte,int64_t & x0,int64_t & y0)125 bool SolveDiophantineEquationOfSizeTwo(int64_t& a, int64_t& b, int64_t& cte,
126                                        int64_t& x0, int64_t& y0) {
127   CHECK_NE(a, 0);
128   CHECK_NE(b, 0);
129   CHECK_NE(a, std::numeric_limits<int64_t>::min());
130   CHECK_NE(b, std::numeric_limits<int64_t>::min());
131 
132   const int64_t gcd = std::gcd(std::abs(a), std::abs(b));
133   if (cte % gcd != 0) return false;
134   a /= gcd;
135   b /= gcd;
136   cte /= gcd;
137 
138   // The simple case where (0, 0) is a solution.
139   if (cte == 0) {
140     x0 = y0 = 0;
141     return true;
142   }
143 
144   // We solve a * X + b * Y = cte
145   // We take a valid x0 in [0, b) by considering the equation mod b.
146   x0 = ProductWithModularInverse(a, b, cte);
147 
148   // We choose x0 of the same sign as cte.
149   if (cte < 0 && x0 != 0) x0 -= std::abs(b);
150 
151   // By plugging X = x0 + b * Z
152   // We have a * (x0 + b * Z) + b * Y = cte
153   // so a * b * Z + b * Y = cte - a * x0;
154   // and y0 = (cte - a * x0) / b (with an exact division by construction).
155   const absl::int128 t = absl::int128{cte} - absl::int128{a} * absl::int128{x0};
156   DCHECK_EQ(t % absl::int128{b}, absl::int128{0});
157 
158   // Overflow-wise, there is two cases for cte > 0:
159   // - a * x0 <= cte, in this case y0 will not overflow (<= cte).
160   // - a * x0 > cte, in this case y0 will be in (-a, 0].
161   const absl::int128 r = t / absl::int128{b};
162   DCHECK_LE(r, absl::int128{std::numeric_limits<int64_t>::max()});
163   DCHECK_GE(r, absl::int128{std::numeric_limits<int64_t>::min()});
164 
165   y0 = static_cast<int64_t>(r);
166   return true;
167 }
168 
MoveOneUnprocessedLiteralLast(const std::set<LiteralIndex> & processed,int relevant_prefix_size,std::vector<Literal> * literals)169 int MoveOneUnprocessedLiteralLast(const std::set<LiteralIndex>& processed,
170                                   int relevant_prefix_size,
171                                   std::vector<Literal>* literals) {
172   if (literals->empty()) return -1;
173   if (!gtl::ContainsKey(processed, literals->back().Index())) {
174     return std::min<int>(relevant_prefix_size, literals->size());
175   }
176 
177   // To get O(n log n) size of suffixes, we will first process the last n/2
178   // literals, we then move all of them first and process the n/2 literals left.
179   // We use the same algorithm recursively. The sum of the suffixes' size S(n)
180   // is thus S(n/2) + n + S(n/2). That gives us the correct complexity. The code
181   // below simulates one step of this algorithm and is made to be "robust" when
182   // from one call to the next, some literals have been removed (but the order
183   // of literals is preserved).
184   int num_processed = 0;
185   int num_not_processed = 0;
186   int target_prefix_size = literals->size() - 1;
187   for (int i = literals->size() - 1; i >= 0; i--) {
188     if (gtl::ContainsKey(processed, (*literals)[i].Index())) {
189       ++num_processed;
190     } else {
191       ++num_not_processed;
192       target_prefix_size = i;
193     }
194     if (num_not_processed >= num_processed) break;
195   }
196   if (num_not_processed == 0) return -1;
197   target_prefix_size = std::min(target_prefix_size, relevant_prefix_size);
198 
199   // Once a prefix size has been decided, it is always better to
200   // enqueue the literal already processed first.
201   std::stable_partition(literals->begin() + target_prefix_size, literals->end(),
202                         [&processed](Literal l) {
203                           return gtl::ContainsKey(processed, l.Index());
204                         });
205   return target_prefix_size;
206 }
207 
Reset(double reset_value)208 void IncrementalAverage::Reset(double reset_value) {
209   num_records_ = 0;
210   average_ = reset_value;
211 }
212 
AddData(double new_record)213 void IncrementalAverage::AddData(double new_record) {
214   num_records_++;
215   average_ += (new_record - average_) / num_records_;
216 }
217 
AddData(double new_record)218 void ExponentialMovingAverage::AddData(double new_record) {
219   num_records_++;
220   average_ = (num_records_ == 1)
221                  ? new_record
222                  : (new_record + decaying_factor_ * (average_ - new_record));
223 }
224 
AddRecord(double record)225 void Percentile::AddRecord(double record) {
226   records_.push_front(record);
227   if (records_.size() > record_limit_) {
228     records_.pop_back();
229   }
230 }
231 
GetPercentile(double percent)232 double Percentile::GetPercentile(double percent) {
233   CHECK_GT(records_.size(), 0);
234   CHECK_LE(percent, 100.0);
235   CHECK_GE(percent, 0.0);
236   std::vector<double> sorted_records(records_.begin(), records_.end());
237   std::sort(sorted_records.begin(), sorted_records.end());
238   const int num_records = sorted_records.size();
239 
240   const double percentile_rank =
241       static_cast<double>(num_records) * percent / 100.0 - 0.5;
242   if (percentile_rank <= 0) {
243     return sorted_records.front();
244   } else if (percentile_rank >= num_records - 1) {
245     return sorted_records.back();
246   }
247   // Interpolate.
248   DCHECK_GE(num_records, 2);
249   DCHECK_LT(percentile_rank, num_records - 1);
250   const int lower_rank = static_cast<int>(std::floor(percentile_rank));
251   DCHECK_LT(lower_rank, num_records - 1);
252   return sorted_records[lower_rank] +
253          (percentile_rank - lower_rank) *
254              (sorted_records[lower_rank + 1] - sorted_records[lower_rank]);
255 }
256 
CompressTuples(absl::Span<const int64_t> domain_sizes,int64_t any_value,std::vector<std::vector<int64_t>> * tuples)257 void CompressTuples(absl::Span<const int64_t> domain_sizes, int64_t any_value,
258                     std::vector<std::vector<int64_t>>* tuples) {
259   if (tuples->empty()) return;
260 
261   // Remove duplicates if any.
262   gtl::STLSortAndRemoveDuplicates(tuples);
263 
264   const int num_vars = (*tuples)[0].size();
265 
266   std::vector<int> to_remove;
267   std::vector<int64_t> tuple_minus_var_i(num_vars - 1);
268   for (int i = 0; i < num_vars; ++i) {
269     const int domain_size = domain_sizes[i];
270     if (domain_size == 1) continue;
271     absl::flat_hash_map<const std::vector<int64_t>, std::vector<int>>
272         masked_tuples_to_indices;
273     for (int t = 0; t < tuples->size(); ++t) {
274       int out = 0;
275       for (int j = 0; j < num_vars; ++j) {
276         if (i == j) continue;
277         tuple_minus_var_i[out++] = (*tuples)[t][j];
278       }
279       masked_tuples_to_indices[tuple_minus_var_i].push_back(t);
280     }
281     to_remove.clear();
282     for (const auto& it : masked_tuples_to_indices) {
283       if (it.second.size() != domain_size) continue;
284       (*tuples)[it.second.front()][i] = any_value;
285       to_remove.insert(to_remove.end(), it.second.begin() + 1, it.second.end());
286     }
287     std::sort(to_remove.begin(), to_remove.end(), std::greater<int>());
288     for (const int t : to_remove) {
289       (*tuples)[t] = tuples->back();
290       tuples->pop_back();
291     }
292   }
293 }
294 
295 }  // namespace sat
296 }  // namespace operations_research
297