1// Copyright 2010-2021 Google LLC
2// Licensed under the Apache License, Version 2.0 (the "License");
3// you may not use this file except in compliance with the License.
4// You may obtain a copy of the License at
5//
6//     http://www.apache.org/licenses/LICENSE-2.0
7//
8// Unless required by applicable law or agreed to in writing, software
9// distributed under the License is distributed on an "AS IS" BASIS,
10// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11// See the License for the specific language governing permissions and
12// limitations under the License.
13
14syntax = "proto2";
15
16package operations_research.sat;
17
18option java_package = "com.google.ortools.sat";
19option java_multiple_files = true;
20
21option csharp_namespace = "Google.OrTools.Sat";
22
23
24// Contains the definitions for all the sat algorithm parameters and their
25// default values.
26//
27// NEXT TAG: 201
28message SatParameters {
29  // In some context, like in a portfolio of search, it makes sense to name a
30  // given parameters set for logging purpose.
31  optional string name = 171 [default = ""];
32
33  // ==========================================================================
34  // Branching and polarity
35  // ==========================================================================
36
37  // Variables without activity (i.e. at the beginning of the search) will be
38  // tried in this preferred order.
39  enum VariableOrder {
40    IN_ORDER = 0;  // As specified by the problem.
41    IN_REVERSE_ORDER = 1;
42    IN_RANDOM_ORDER = 2;
43  }
44  optional VariableOrder preferred_variable_order = 1 [default = IN_ORDER];
45
46  // Specifies the initial polarity (true/false) when the solver branches on a
47  // variable. This can be modified later by the user, or the phase saving
48  // heuristic.
49  //
50  // Note(user): POLARITY_FALSE is usually a good choice because of the
51  // "natural" way to express a linear boolean problem.
52  enum Polarity {
53    POLARITY_TRUE = 0;
54    POLARITY_FALSE = 1;
55    POLARITY_RANDOM = 2;
56
57    // Choose the sign that tends to satisfy the most constraints. This is
58    // computed using a weighted sum: if a literal l appears in a constraint of
59    // the form: ... + coeff * l +... <= rhs with positive coefficients and
60    // rhs, then -sign(l) * coeff / rhs is added to the weight of l.variable().
61    POLARITY_WEIGHTED_SIGN = 3;
62
63    // The opposite choice of POLARITY_WEIGHTED_SIGN.
64    POLARITY_REVERSE_WEIGHTED_SIGN = 4;
65  }
66  optional Polarity initial_polarity = 2 [default = POLARITY_FALSE];
67
68  // If this is true, then the polarity of a variable will be the last value it
69  // was assigned to, or its default polarity if it was never assigned since the
70  // call to ResetDecisionHeuristic().
71  //
72  // Actually, we use a newer version where we follow the last value in the
73  // longest non-conflicting partial assignment in the current phase.
74  //
75  // This is called 'literal phase saving'. For details see 'A Lightweight
76  // Component Caching Scheme for Satisfiability Solvers' K. Pipatsrisawat and
77  // A.Darwiche, In 10th International Conference on Theory and Applications of
78  // Satisfiability Testing, 2007.
79  optional bool use_phase_saving = 44 [default = true];
80
81  // If non-zero, then we change the polarity heuristic after that many number
82  // of conflicts in an arithmetically increasing fashion. So x the first time,
83  // 2 * x the second time, etc...
84  optional int32 polarity_rephase_increment = 168 [default = 1000];
85
86  // The proportion of polarity chosen at random. Note that this take
87  // precedence over the phase saving heuristic. This is different from
88  // initial_polarity:POLARITY_RANDOM because it will select a new random
89  // polarity each time the variable is branched upon instead of selecting one
90  // initially and then always taking this choice.
91  optional double random_polarity_ratio = 45 [default = 0.0];
92
93  // A number between 0 and 1 that indicates the proportion of branching
94  // variables that are selected randomly instead of choosing the first variable
95  // from the given variable_ordering strategy.
96  optional double random_branches_ratio = 32 [default = 0.0];
97
98  // Whether we use the ERWA (Exponential Recency Weighted Average) heuristic as
99  // described in "Learning Rate Based Branching Heuristic for SAT solvers",
100  // J.H.Liang, V. Ganesh, P. Poupart, K.Czarnecki, SAT 2016.
101  optional bool use_erwa_heuristic = 75 [default = false];
102
103  // The initial value of the variables activity. A non-zero value only make
104  // sense when use_erwa_heuristic is true. Experiments with a value of 1e-2
105  // together with the ERWA heuristic showed slighthly better result than simply
106  // using zero. The idea is that when the "learning rate" of a variable becomes
107  // lower than this value, then we prefer to branch on never explored before
108  // variables. This is not in the ERWA paper.
109  optional double initial_variables_activity = 76 [default = 0.0];
110
111  // When this is true, then the variables that appear in any of the reason of
112  // the variables in a conflict have their activity bumped. This is addition to
113  // the variables in the conflict, and the one that were used during conflict
114  // resolution.
115  optional bool also_bump_variables_in_conflict_reasons = 77 [default = false];
116
117  // ==========================================================================
118  // Conflict analysis
119  // ==========================================================================
120
121  // Do we try to minimize conflicts (greedily) when creating them.
122  enum ConflictMinimizationAlgorithm {
123    NONE = 0;
124    SIMPLE = 1;
125    RECURSIVE = 2;
126    EXPERIMENTAL = 3;
127  }
128  optional ConflictMinimizationAlgorithm minimization_algorithm = 4
129      [default = RECURSIVE];
130
131  // Whether to expoit the binary clause to minimize learned clauses further.
132  // This will have an effect only if treat_binary_clauses_separately is true.
133  enum BinaryMinizationAlgorithm {
134    NO_BINARY_MINIMIZATION = 0;
135    BINARY_MINIMIZATION_FIRST = 1;
136    BINARY_MINIMIZATION_FIRST_WITH_TRANSITIVE_REDUCTION = 4;
137    BINARY_MINIMIZATION_WITH_REACHABILITY = 2;
138    EXPERIMENTAL_BINARY_MINIMIZATION = 3;
139  }
140  optional BinaryMinizationAlgorithm binary_minimization_algorithm = 34
141      [default = BINARY_MINIMIZATION_FIRST];
142
143  // At a really low cost, during the 1-UIP conflict computation, it is easy to
144  // detect if some of the involved reasons are subsumed by the current
145  // conflict. When this is true, such clauses are detached and later removed
146  // from the problem.
147  optional bool subsumption_during_conflict_analysis = 56 [default = true];
148
149  // ==========================================================================
150  // Clause database management
151  // ==========================================================================
152
153  // Trigger a cleanup when this number of "deletable" clauses is learned.
154  optional int32 clause_cleanup_period = 11 [default = 10000];
155
156  // During a cleanup, we will always keep that number of "deletable" clauses.
157  // Note that this doesn't include the "protected" clauses.
158  optional int32 clause_cleanup_target = 13 [default = 0];
159
160  // During a cleanup, if clause_cleanup_target is 0, we will delete the
161  // clause_cleanup_ratio of "deletable" clauses instead of aiming for a fixed
162  // target of clauses to keep.
163  optional double clause_cleanup_ratio = 190 [default = 0.5];
164
165  // Each time a clause activity is bumped, the clause has a chance to be
166  // protected during the next cleanup phase. Note that clauses used as a reason
167  // are always protected.
168  enum ClauseProtection {
169    PROTECTION_NONE = 0;    // No protection.
170    PROTECTION_ALWAYS = 1;  // Protect all clauses whose activity is bumped.
171    PROTECTION_LBD = 2;     // Only protect clause with a better LBD.
172  }
173  optional ClauseProtection clause_cleanup_protection = 58
174      [default = PROTECTION_NONE];
175
176  // All the clauses with a LBD (literal blocks distance) lower or equal to this
177  // parameters will always be kept.
178  optional int32 clause_cleanup_lbd_bound = 59 [default = 5];
179
180  // The clauses that will be kept during a cleanup are the ones that come
181  // first under this order. We always keep or exclude ties together.
182  enum ClauseOrdering {
183    // Order clause by decreasing activity, then by increasing LBD.
184    CLAUSE_ACTIVITY = 0;
185    // Order clause by increasing LBD, then by decreasing activity.
186    CLAUSE_LBD = 1;
187  }
188  optional ClauseOrdering clause_cleanup_ordering = 60
189      [default = CLAUSE_ACTIVITY];
190
191  // Same as for the clauses, but for the learned pseudo-Boolean constraints.
192  optional int32 pb_cleanup_increment = 46 [default = 200];
193  optional double pb_cleanup_ratio = 47 [default = 0.5];
194
195  // Parameters for an heuristic similar to the one descibed in "An effective
196  // learnt clause minimization approach for CDCL Sat Solvers",
197  // https://www.ijcai.org/proceedings/2017/0098.pdf
198  //
199  // For now, we have a somewhat simpler implementation where every x restart we
200  // spend y decisions on clause minimization. The minimization technique is the
201  // same as the one used to minimize core in max-sat. We also minimize problem
202  // clauses and not just the learned clause that we keep forever like in the
203  // paper.
204  //
205  // Changing these parameters or the kind of clause we minimize seems to have
206  // a big impact on the overall perf on our benchmarks. So this technique seems
207  // definitely useful, but it is hard to tune properly.
208  optional int32 minimize_with_propagation_restart_period = 96 [default = 10];
209  optional int32 minimize_with_propagation_num_decisions = 97 [default = 1000];
210
211  // ==========================================================================
212  // Variable and clause activities
213  // ==========================================================================
214
215  // Each time a conflict is found, the activities of some variables are
216  // increased by one. Then, the activity of all variables are multiplied by
217  // variable_activity_decay.
218  //
219  // To implement this efficiently, the activity of all the variables is not
220  // decayed at each conflict. Instead, the activity increment is multiplied by
221  // 1 / decay. When an activity reach max_variable_activity_value, all the
222  // activity are multiplied by 1 / max_variable_activity_value.
223  optional double variable_activity_decay = 15 [default = 0.8];
224  optional double max_variable_activity_value = 16 [default = 1e100];
225
226  // The activity starts at 0.8 and increment by 0.01 every 5000 conflicts until
227  // 0.95. This "hack" seems to work well and comes from:
228  //
229  // Glucose 2.3 in the SAT 2013 Competition - SAT Competition 2013
230  // http://edacc4.informatik.uni-ulm.de/SC13/solver-description-download/136
231  optional double glucose_max_decay = 22 [default = 0.95];
232  optional double glucose_decay_increment = 23 [default = 0.01];
233  optional int32 glucose_decay_increment_period = 24 [default = 5000];
234
235  // Clause activity parameters (same effect as the one on the variables).
236  optional double clause_activity_decay = 17 [default = 0.999];
237  optional double max_clause_activity_value = 18 [default = 1e20];
238
239  // ==========================================================================
240  // Restart
241  // ==========================================================================
242
243  // Restart algorithms.
244  //
245  // A reference for the more advanced ones is:
246  // Gilles Audemard, Laurent Simon, "Refining Restarts Strategies for SAT
247  // and UNSAT", Principles and Practice of Constraint Programming Lecture
248  // Notes in Computer Science 2012, pp 118-126
249  enum RestartAlgorithm {
250    NO_RESTART = 0;
251
252    // Just follow a Luby sequence times restart_period.
253    LUBY_RESTART = 1;
254
255    // Moving average restart based on the decision level of conflicts.
256    DL_MOVING_AVERAGE_RESTART = 2;
257
258    // Moving average restart based on the LBD of conflicts.
259    LBD_MOVING_AVERAGE_RESTART = 3;
260
261    // Fixed period restart every restart period.
262    FIXED_RESTART = 4;
263  }
264
265  // The restart strategies will change each time the strategy_counter is
266  // increased. The current strategy will simply be the one at index
267  // strategy_counter modulo the number of strategy. Note that if this list
268  // includes a NO_RESTART, nothing will change when it is reached because the
269  // strategy_counter will only increment after a restart.
270  //
271  // The idea of switching of search strategy tailored for SAT/UNSAT comes from
272  // Chanseok Oh with his COMiniSatPS solver, see http://cs.nyu.edu/~chanseok/.
273  // But more generally, it seems REALLY beneficial to try different strategy.
274  repeated RestartAlgorithm restart_algorithms = 61;
275  optional string default_restart_algorithms = 70
276      [default =
277           "LUBY_RESTART,LBD_MOVING_AVERAGE_RESTART,DL_MOVING_AVERAGE_RESTART"];
278
279  // Restart period for the FIXED_RESTART strategy. This is also the multiplier
280  // used by the LUBY_RESTART strategy.
281  optional int32 restart_period = 30 [default = 50];
282
283  // Size of the window for the moving average restarts.
284  optional int32 restart_running_window_size = 62 [default = 50];
285
286  // In the moving average restart algorithms, a restart is triggered if the
287  // window average times this ratio is greater that the global average.
288  optional double restart_dl_average_ratio = 63 [default = 1.0];
289  optional double restart_lbd_average_ratio = 71 [default = 1.0];
290
291  // Block a moving restart algorithm if the trail size of the current conflict
292  // is greater than the multiplier times the moving average of the trail size
293  // at the previous conflicts.
294  optional bool use_blocking_restart = 64 [default = false];
295  optional int32 blocking_restart_window_size = 65 [default = 5000];
296  optional double blocking_restart_multiplier = 66 [default = 1.4];
297
298  // After each restart, if the number of conflict since the last strategy
299  // change is greater that this, then we increment a "strategy_counter" that
300  // can be use to change the search strategy used by the following restarts.
301  optional int32 num_conflicts_before_strategy_changes = 68 [default = 0];
302
303  // The parameter num_conflicts_before_strategy_changes is increased by that
304  // much after each strategy change.
305  optional double strategy_change_increase_ratio = 69 [default = 0.0];
306
307  // ==========================================================================
308  // Limits
309  // ==========================================================================
310
311  // Maximum time allowed in seconds to solve a problem.
312  // The counter will starts at the beginning of the Solve() call.
313  optional double max_time_in_seconds = 36 [default = inf];
314
315  // Maximum time allowed in deterministic time to solve a problem.
316  // The deterministic time should be correlated with the real time used by the
317  // solver, the time unit being as close as possible to a second.
318  optional double max_deterministic_time = 67 [default = inf];
319
320  // Maximum number of conflicts allowed to solve a problem.
321  //
322  // TODO(user,user): Maybe change the way the conflict limit is enforced?
323  // currently it is enforced on each independent internal SAT solve, rather
324  // than on the overall number of conflicts across all solves. So in the
325  // context of an optimization problem, this is not really usable directly by a
326  // client.
327  optional int64 max_number_of_conflicts = 37
328      [default = 0x7FFFFFFFFFFFFFFF];  // kint64max
329
330  // Maximum memory allowed for the whole thread containing the solver. The
331  // solver will abort as soon as it detects that this limit is crossed. As a
332  // result, this limit is approximative, but usually the solver will not go too
333  // much over.
334  //
335  // TODO(user): This is only used by the pure SAT solver, generalize to CP-SAT.
336  optional int64 max_memory_in_mb = 40 [default = 10000];
337
338  // Stop the search when the gap between the best feasible objective (O) and
339  // our best objective bound (B) is smaller than a limit.
340  // The exact definition is:
341  // - Absolute: abs(O - B)
342  // - Relative: abs(O - B) / max(1, abs(O)).
343  //
344  // Important: The relative gap depends on the objective offset! If you
345  // artificially shift the objective, you will get widely different value of
346  // the relative gap.
347  //
348  // Note that if the gap is reached, the search status will be OPTIMAL. But
349  // one can check the best objective bound to see the actual gap.
350  //
351  // If the objective is integer, then any absolute gap < 1 will lead to a true
352  // optimal. If the objective is floating point, a gap of zero make little
353  // sense so is is why we use a non-zero default value. At the end of the
354  // search, we will display a warning if OPTIMAL is reported yet the gap is
355  // greater than this absolute gap.
356  optional double absolute_gap_limit = 159 [default = 1e-4];
357  optional double relative_gap_limit = 160 [default = 0.0];
358
359  // ==========================================================================
360  // Other parameters
361  // ==========================================================================
362
363  // If true, the binary clauses are treated separately from the others. This
364  // should be faster and uses less memory. However it changes the propagation
365  // order.
366  optional bool treat_binary_clauses_separately = 33 [default = true];
367
368  // At the beginning of each solve, the random number generator used in some
369  // part of the solver is reinitialized to this seed. If you change the random
370  // seed, the solver may make different choices during the solving process.
371  //
372  // For some problems, the running time may vary a lot depending on small
373  // change in the solving algorithm. Running the solver with different seeds
374  // enables to have more robust benchmarks when evaluating new features.
375  optional int32 random_seed = 31 [default = 1];
376
377  // This is mainly here to test the solver variability. Note that in tests, if
378  // not explicitly set to false, all 3 options will be set to true so that
379  // clients do not rely on the solver returning a specific solution if they are
380  // many equivalent optimal solutions.
381  optional bool permute_variable_randomly = 178 [default = false];
382  optional bool permute_presolve_constraint_order = 179 [default = false];
383  optional bool use_absl_random = 180 [default = false];
384
385  // Whether the solver should log the search progress. By default, it logs to
386  // LOG(INFO). This can be overwritten by the log_destination parameter.
387  optional bool log_search_progress = 41 [default = false];
388
389  // Whether the solver should display per sub-solver search statistics.
390  // This is only useful is log_search_progress is set to true, and if the
391  // number of search workers is > 1.
392  optional bool log_subsolver_statistics = 189 [default = true];
393
394  // Add a prefix to all logs.
395  optional string log_prefix = 185 [default = ""];
396
397  // Log to stdout.
398  optional bool log_to_stdout = 186 [default = true];
399
400  // Log to response proto.
401  optional bool log_to_response = 187 [default = false];
402
403  // Whether to use pseudo-Boolean resolution to analyze a conflict. Note that
404  // this option only make sense if your problem is modelized using
405  // pseudo-Boolean constraints. If you only have clauses, this shouldn't change
406  // anything (except slow the solver down).
407  optional bool use_pb_resolution = 43 [default = false];
408
409  // A different algorithm during PB resolution. It minimizes the number of
410  // calls to ReduceCoefficients() which can be time consuming. However, the
411  // search space will be different and if the coefficients are large, this may
412  // lead to integer overflows that could otherwise be prevented.
413  optional bool minimize_reduction_during_pb_resolution = 48 [default = false];
414
415  // Whether or not the assumption levels are taken into account during the LBD
416  // computation. According to the reference below, not counting them improves
417  // the solver in some situation. Note that this only impact solves under
418  // assumptions.
419  //
420  // Gilles Audemard, Jean-Marie Lagniez, Laurent Simon, "Improving Glucose for
421  // Incremental SAT Solving with Assumptions: Application to MUS Extraction"
422  // Theory and Applications of Satisfiability Testing - SAT 2013, Lecture Notes
423  // in Computer Science Volume 7962, 2013, pp 309-317.
424  optional bool count_assumption_levels_in_lbd = 49 [default = true];
425
426  // ==========================================================================
427  // Presolve
428  // ==========================================================================
429
430  // During presolve, only try to perform the bounded variable elimination (BVE)
431  // of a variable x if the number of occurrences of x times the number of
432  // occurrences of not(x) is not greater than this parameter.
433  optional int32 presolve_bve_threshold = 54 [default = 500];
434
435  // During presolve, we apply BVE only if this weight times the number of
436  // clauses plus the number of clause literals is not increased.
437  optional int32 presolve_bve_clause_weight = 55 [default = 3];
438
439  // The maximum "deterministic" time limit to spend in probing. A value of
440  // zero will disable the probing.
441  optional double presolve_probing_deterministic_time_limit = 57
442      [default = 30.0];
443
444  // Whether we use an heuristic to detect some basic case of blocked clause
445  // in the SAT presolve.
446  optional bool presolve_blocked_clause = 88 [default = true];
447
448  // Whether or not we use Bounded Variable Addition (BVA) in the presolve.
449  optional bool presolve_use_bva = 72 [default = true];
450
451  // Apply Bounded Variable Addition (BVA) if the number of clauses is reduced
452  // by stricly more than this threshold. The algorithm described in the paper
453  // uses 0, but quick experiments showed that 1 is a good value. It may not be
454  // worth it to add a new variable just to remove one clause.
455  optional int32 presolve_bva_threshold = 73 [default = 1];
456
457  // In case of large reduction in a presolve iteration, we perform multiple
458  // presolve iterations. This parameter controls the maximum number of such
459  // presolve iterations.
460  optional int32 max_presolve_iterations = 138 [default = 3];
461
462  // Whether we presolve the cp_model before solving it.
463  optional bool cp_model_presolve = 86 [default = true];
464
465  // How much effort do we spend on probing. 0 disables it completely.
466  optional int32 cp_model_probing_level = 110 [default = 2];
467
468  // Whether we also use the sat presolve when cp_model_presolve is true.
469  optional bool cp_model_use_sat_presolve = 93 [default = true];
470  optional bool use_sat_inprocessing = 163 [default = false];
471
472  // If true, expand all_different constraints that are not permutations.
473  // Permutations (#Variables = #Values) are always expanded.
474  optional bool expand_alldiff_constraints = 170 [default = false];
475
476  // If true, it disable all constraint expansion.
477  // This should only be used to test the presolve of expanded constraints.
478  optional bool disable_constraint_expansion = 181 [default = false];
479
480  // During presolve, we use a maximum clique heuristic to merge together
481  // no-overlap constraints or at most one constraints. This code can be slow,
482  // so we have a limit in place on the number of explored nodes in the
483  // underlying graph. The internal limit is an int64, but we use double here to
484  // simplify manual input.
485  optional double merge_no_overlap_work_limit = 145 [default = 1e12];
486  optional double merge_at_most_one_work_limit = 146 [default = 1e8];
487
488  // How much substitution (also called free variable aggregation in MIP
489  // litterature) should we perform at presolve. This currently only concerns
490  // variable appearing only in linear constraints. For now the value 0 turns it
491  // off and any positive value performs substitution.
492  optional int32 presolve_substitution_level = 147 [default = 1];
493
494  // If true, we will extract from linear constraints, enforcement literals of
495  // the form "integer variable at bound => simplified constraint". This should
496  // always be beneficial except that we don't always handle them as efficiently
497  // as we could for now. This causes problem on manna81.mps (LP relaxation not
498  // as tight it seems) and on neos-3354841-apure.mps.gz (too many literals
499  // created this way).
500  optional bool presolve_extract_integer_enforcement = 174 [default = false];
501
502  // ==========================================================================
503  // Debugging parameters
504  // ==========================================================================
505
506  // We have two different postsolve code. The default one should be better and
507  // it allows for a more powerful presolve, but it can be useful to postsolve
508  // using the full solver instead.
509  optional bool debug_postsolve_with_full_solver = 162 [default = false];
510
511  // If positive, try to stop just after that many presolve rules have been
512  // applied. This is mainly useful for debugging presolve.
513  optional int32 debug_max_num_presolve_operations = 151 [default = 0];
514
515  // Crash if we do not manage to complete the hint into a full solution.
516  optional bool debug_crash_on_bad_hint = 195 [default = false];
517
518  // ==========================================================================
519  // Max-sat parameters
520  // ==========================================================================
521
522  // For an optimization problem, whether we follow some hints in order to find
523  // a better first solution. For a variable with hint, the solver will always
524  // try to follow the hint. It will revert to the variable_branching default
525  // otherwise.
526  optional bool use_optimization_hints = 35 [default = true];
527
528  // Whether we use a simple heuristic to try to minimize an UNSAT core.
529  optional bool minimize_core = 50 [default = true];
530
531  // Whether we try to find more independent cores for a given set of
532  // assumptions in the core based max-SAT algorithms.
533  optional bool find_multiple_cores = 84 [default = true];
534
535  // If true, when the max-sat algo find a core, we compute the minimal number
536  // of literals in the core that needs to be true to have a feasible solution.
537  optional bool cover_optimization = 89 [default = true];
538
539  // In what order do we add the assumptions in a core-based max-sat algorithm
540  enum MaxSatAssumptionOrder {
541    DEFAULT_ASSUMPTION_ORDER = 0;
542    ORDER_ASSUMPTION_BY_DEPTH = 1;
543    ORDER_ASSUMPTION_BY_WEIGHT = 2;
544  }
545  optional MaxSatAssumptionOrder max_sat_assumption_order = 51
546      [default = DEFAULT_ASSUMPTION_ORDER];
547
548  // If true, adds the assumption in the reverse order of the one defined by
549  // max_sat_assumption_order.
550  optional bool max_sat_reverse_assumption_order = 52 [default = false];
551
552  // What stratification algorithm we use in the presence of weight.
553  enum MaxSatStratificationAlgorithm {
554    // No stratification of the problem.
555    STRATIFICATION_NONE = 0;
556
557    // Start with literals with the highest weight, and when SAT, add the
558    // literals with the next highest weight and so on.
559    STRATIFICATION_DESCENT = 1;
560
561    // Start with all literals. Each time a core is found with a given minimum
562    // weight, do not consider literals with a lower weight for the next core
563    // computation. If the subproblem is SAT, do like in STRATIFICATION_DESCENT
564    // and just add the literals with the next highest weight.
565    STRATIFICATION_ASCENT = 2;
566  }
567  optional MaxSatStratificationAlgorithm max_sat_stratification = 53
568      [default = STRATIFICATION_DESCENT];
569
570  // ==========================================================================
571  // Constraint programming parameters
572  // ==========================================================================
573
574  // When this is true, then a disjunctive constraint will try to use the
575  // precedence relations between time intervals to propagate their bounds
576  // further. For instance if task A and B are both before C and task A and B
577  // are in disjunction, then we can deduce that task C must start after
578  // duration(A) + duration(B) instead of simply max(duration(A), duration(B)),
579  // provided that the start time for all task was currently zero.
580  //
581  // This always result in better propagation, but it is usually slow, so
582  // depending on the problem, turning this off may lead to a faster solution.
583  optional bool use_precedences_in_disjunctive_constraint = 74 [default = true];
584
585  // When this is true, the cumulative constraint is reinforced with overload
586  // checking, i.e., an additional level of reasoning based on energy. This
587  // additional level supplements the default level of reasoning as well as
588  // timetable edge finding.
589  //
590  // This always result in better propagation, but it is usually slow, so
591  // depending on the problem, turning this off may lead to a faster solution.
592  optional bool use_overload_checker_in_cumulative_constraint = 78
593      [default = false];
594
595  // When this is true, the cumulative constraint is reinforced with timetable
596  // edge finding, i.e., an additional level of reasoning based on the
597  // conjunction of energy and mandatory parts. This additional level
598  // supplements the default level of reasoning as well as overload_checker.
599  //
600  // This always result in better propagation, but it is usually slow, so
601  // depending on the problem, turning this off may lead to a faster solution.
602  optional bool use_timetable_edge_finding_in_cumulative_constraint = 79
603      [default = false];
604
605  // When this is true, the cumulative constraint is reinforced with propagators
606  // from the disjunctive constraint to improve the inference on a set of tasks
607  // that are disjunctive at the root of the problem. This additional level
608  // supplements the default level of reasoning.
609  //
610  // Propagators of the cumulative constraint will not be used at all if all the
611  // tasks are disjunctive at root node.
612  //
613  // This always result in better propagation, but it is usually slow, so
614  // depending on the problem, turning this off may lead to a faster solution.
615  optional bool use_disjunctive_constraint_in_cumulative_constraint = 80
616      [default = true];
617
618  // When this is true, the no_overlap_2d constraint is reinforced with
619  // propagators from the cumulative constraints. It consists of ignoring the
620  // position of rectangles in one position and projecting the no_overlap_2d on
621  // the other dimension to create a cumulative constraint. This is done on both
622  // axis. This additional level supplements the default level of reasoning.
623  optional bool use_cumulative_in_no_overlap_2d = 200 [default = false];
624
625  // A non-negative level indicating the type of constraints we consider in the
626  // LP relaxation. At level zero, no LP relaxation is used. At level 1, only
627  // the linear constraint and full encoding are added. At level 2, we also add
628  // all the Boolean constraints.
629  optional int32 linearization_level = 90 [default = 1];
630
631  // A non-negative level indicating how much we should try to fully encode
632  // Integer variables as Boolean.
633  optional int32 boolean_encoding_level = 107 [default = 1];
634
635  // When loading a*x + b*y ==/!= c when x and y are both fully encoded.
636  // The solver may decide to replace the linear equation by a set of clauses.
637  // This is triggered if the sizes of the domains of x and y are below the
638  // threshold.
639  optional int32 max_domain_size_when_encoding_eq_neq_constraints = 191
640      [default = 16];
641
642  // The limit on the number of cuts in our cut pool. When this is reached we do
643  // not generate cuts anymore.
644  //
645  // TODO(user): We should probably remove this parameters, and just always
646  // generate cuts but only keep the best n or something.
647  optional int32 max_num_cuts = 91 [default = 10000];
648
649  // Control the global cut effort. Zero will turn off all cut. For now we just
650  // have one level. Note also that most cuts are only used at linearization
651  // level >= 2.
652  optional int32 cut_level = 196 [default = 1];
653
654  // For the cut that can be generated at any level, this control if we only
655  // try to generate them at the root node.
656  optional bool only_add_cuts_at_level_zero = 92 [default = false];
657
658  // When the LP objective is fractional, do we add the cut that forces the
659  // linear objective expression to be greater or equal to this fractional value
660  // rounded up? We can always do that since our objective is integer, and
661  // combined with MIR heuristic to reduce the coefficient of such cut, it can
662  // help.
663  optional bool add_objective_cut = 197 [default = false];
664
665  // Whether we generate and add Chvatal-Gomory cuts to the LP at root node.
666  // Note that for now, this is not heavily tuned.
667  optional bool add_cg_cuts = 117 [default = true];
668
669  // Whether we generate MIR cuts at root node.
670  // Note that for now, this is not heavily tuned.
671  optional bool add_mir_cuts = 120 [default = true];
672
673  // Whether we generate Zero-Half cuts at root node.
674  // Note that for now, this is not heavily tuned.
675  optional bool add_zero_half_cuts = 169 [default = true];
676
677  // Whether we generate clique cuts from the binary implication graph. Note
678  // that as the search goes on, this graph will contains new binary clauses
679  // learned by the SAT engine.
680  optional bool add_clique_cuts = 172 [default = true];
681
682  // Cut generator for all diffs can add too many cuts for large all_diff
683  // constraints. This parameter restricts the large all_diff constraints to
684  // have a cut generator.
685  optional int32 max_all_diff_cut_size = 148 [default = 7];
686
687  // For the lin max constraints, generates the cuts described in "Strong
688  // mixed-integer programming formulations for trained neural networks" by Ross
689  // Anderson et. (https://arxiv.org/pdf/1811.01988.pdf)
690  optional bool add_lin_max_cuts = 152 [default = true];
691
692  // In the integer rounding procedure used for MIR and Gomory cut, the maximum
693  // "scaling" we use (must be positive). The lower this is, the lower the
694  // integer coefficients of the cut will be. Note that cut generated by lower
695  // values are not necessarily worse than cut generated by larger value. There
696  // is no strict dominance relationship.
697  //
698  // Setting this to 2 result in the "strong fractional rouding" of Letchford
699  // and Lodi.
700  optional int32 max_integer_rounding_scaling = 119 [default = 600];
701
702  // If true, we start by an empty LP, and only add constraints not satisfied
703  // by the current LP solution batch by batch. A constraint that is only added
704  // like this is known as a "lazy" constraint in the literature, except that we
705  // currently consider all constraints as lazy here.
706  optional bool add_lp_constraints_lazily = 112 [default = true];
707
708  // While adding constraints, skip the constraints which have orthogonality
709  // less than 'min_orthogonality_for_lp_constraints' with already added
710  // constraints during current call. Orthogonality is defined as 1 -
711  // cosine(vector angle between constraints). A value of zero disable this
712  // feature.
713  optional double min_orthogonality_for_lp_constraints = 115 [default = 0.05];
714
715  // Max number of time we perform cut generation and resolve the LP at level 0.
716  optional int32 max_cut_rounds_at_level_zero = 154 [default = 1];
717
718  // If a constraint/cut in LP is not active for that many consecutive OPTIMAL
719  // solves, remove it from the LP. Note that it might be added again later if
720  // it become violated by the current LP solution.
721  optional int32 max_consecutive_inactive_count = 121 [default = 100];
722
723  // These parameters are similar to sat clause management activity parameters.
724  // They are effective only if the number of generated cuts exceed the storage
725  // limit. Default values are based on a few experiments on miplib instances.
726  optional double cut_max_active_count_value = 155 [default = 1e10];
727  optional double cut_active_count_decay = 156 [default = 0.8];
728
729  // Target number of constraints to remove during cleanup.
730  optional int32 cut_cleanup_target = 157 [default = 1000];
731
732  // Add that many lazy constraints (or cuts) at once in the LP. Note that at
733  // the beginning of the solve, we do add more than this.
734  optional int32 new_constraints_batch_size = 122 [default = 50];
735
736  // The search branching will be used to decide how to branch on unfixed nodes.
737  enum SearchBranching {
738    // Try to fix all literals using the underlying SAT solver's heuristics,
739    // then generate and fix literals until integer variables are fixed.
740    AUTOMATIC_SEARCH = 0;
741
742    // If used then all decisions taken by the solver are made using a fixed
743    // order as specified in the API or in the CpModelProto search_strategy
744    // field.
745    FIXED_SEARCH = 1;
746
747    // If used, the solver will use various generic heuristics in turn.
748    PORTFOLIO_SEARCH = 2;
749
750    // If used, the solver will use heuristics from the LP relaxation. This
751    // exploit the reduced costs of the variables in the relaxation.
752    //
753    // TODO(user): Maybe rename REDUCED_COST_SEARCH?
754    LP_SEARCH = 3;
755
756    // If used, the solver uses the pseudo costs for branching. Pseudo costs
757    // are computed using the historical change in objective bounds when some
758    // decision are taken.
759    PSEUDO_COST_SEARCH = 4;
760
761    // Mainly exposed here for testing. This quickly tries a lot of randomized
762    // heuristics with a low conflict limit. It usually provides a good first
763    // solution.
764    PORTFOLIO_WITH_QUICK_RESTART_SEARCH = 5;
765
766    // Mainly used internally. This is like FIXED_SEARCH, except we follow the
767    // solution_hint field of the CpModelProto rather than using the information
768    // provided in the search_strategy.
769    HINT_SEARCH = 6;
770  }
771  optional SearchBranching search_branching = 82 [default = AUTOMATIC_SEARCH];
772
773  // Conflict limit used in the phase that exploit the solution hint.
774  optional int32 hint_conflict_limit = 153 [default = 10];
775
776  // If true, the solver tries to repair the solution given in the hint. This
777  // search terminates after the 'hint_conflict_limit' is reached and the solver
778  // switches to regular search. If false, then  we do a FIXED_SEARCH using the
779  // hint until the hint_conflict_limit is reached.
780  optional bool repair_hint = 167 [default = false];
781
782  // If true, variables appearing in the solution hints will be fixed to their
783  // hinted value.
784  optional bool fix_variables_to_their_hinted_value = 192 [default = false];
785
786  // All the "exploit_*" parameters below work in the same way: when branching
787  // on an IntegerVariable, these parameters affect the value the variable is
788  // branched on. Currently the first heuristic that triggers win in the order
789  // in which they appear below.
790  //
791  // TODO(user): Maybe do like for the restart algorithm, introduce an enum
792  // and a repeated field that control the order on which these are applied?
793
794  // If true and the Lp relaxation of the problem has an integer optimal
795  // solution, try to exploit it. Note that since the LP relaxation may not
796  // contain all the constraints, such a solution is not necessarily a solution
797  // of the full problem.
798  optional bool exploit_integer_lp_solution = 94 [default = true];
799
800  // If true and the Lp relaxation of the problem has a solution, try to exploit
801  // it. This is same as above except in this case the lp solution might not be
802  // an integer solution.
803  optional bool exploit_all_lp_solution = 116 [default = true];
804
805  // When branching on a variable, follow the last best solution value.
806  optional bool exploit_best_solution = 130 [default = false];
807
808  // When branching on a variable, follow the last best relaxation solution
809  // value. We use the relaxation with the tightest bound on the objective as
810  // the best relaxation solution.
811  optional bool exploit_relaxation_solution = 161 [default = false];
812
813  // When branching an a variable that directly affect the objective,
814  // branch on the value that lead to the best objective first.
815  optional bool exploit_objective = 131 [default = true];
816
817  // If set at zero (the default), it is disabled. Otherwise the solver attempts
818  // probing at every 'probing_period' root node. Period of 1 enables probing at
819  // every root node.
820  optional int64 probing_period_at_root = 142 [default = 0];
821
822  // If true, search will continuously probe Boolean variables, and integer
823  // variable bounds.
824  optional bool use_probing_search = 176 [default = false];
825
826  // The solver ignores the pseudo costs of variables with number of recordings
827  // less than this threshold.
828  optional int64 pseudo_cost_reliability_threshold = 123 [default = 100];
829
830  // The default optimization method is a simple "linear scan", each time trying
831  // to find a better solution than the previous one. If this is true, then we
832  // use a core-based approach (like in max-SAT) when we try to increase the
833  // lower bound instead.
834  optional bool optimize_with_core = 83 [default = false];
835
836  // Do a more conventional tree search (by opposition to SAT based one) where
837  // we keep all the explored node in a tree. This is meant to be used in a
838  // portfolio and focus on improving the objective lower bound. Keeping the
839  // whole tree allow us to report a better objective lower bound coming from
840  // the worst open node in the tree.
841  optional bool optimize_with_lb_tree_search = 188 [default = false];
842
843  // If non-negative, perform a binary search on the objective variable in order
844  // to find an [min, max] interval outside of which the solver proved unsat/sat
845  // under this amount of conflict. This can quickly reduce the objective domain
846  // on some problems.
847  optional int32 binary_search_num_conflicts = 99 [default = -1];
848
849  // This has no effect if optimize_with_core is false. If true, use a different
850  // core-based algorithm similar to the max-HS algo for max-SAT. This is a
851  // hybrid MIP/CP approach and it uses a MIP solver in addition to the CP/SAT
852  // one. This is also related to the PhD work of tobyodavies@
853  // "Automatic Logic-Based Benders Decomposition with MiniZinc"
854  // http://aaai.org/ocs/index.php/AAAI/AAAI17/paper/view/14489
855  optional bool optimize_with_max_hs = 85 [default = false];
856
857  // Whether we enumerate all solutions of a problem without objective. Note
858  // that setting this to true automatically disable some presolve reduction
859  // that can remove feasible solution. That is it has the same effect as
860  // setting keep_all_feasible_solutions_in_presolve.
861  //
862  // TODO(user): Do not do that and let the user choose what behavior is best by
863  // setting keep_all_feasible_solutions_in_presolve ?
864  optional bool enumerate_all_solutions = 87 [default = false];
865
866  // If true, we disable the presolve reductions that remove feasible solutions
867  // from the search space. Such solution are usually dominated by a "better"
868  // solution that is kept, but depending on the situation, we might want to
869  // keep all solutions.
870  //
871  // A trivial example is when a variable is unused. If this is true, then the
872  // presolve will not fix it to an arbitrary value and it will stay in the
873  // search space.
874  optional bool keep_all_feasible_solutions_in_presolve = 173 [default = false];
875
876  // If true, add information about the derived variable domains to the
877  // CpSolverResponse. It is an option because it makes the response slighly
878  // bigger and there is a bit more work involved during the postsolve to
879  // construct it, but it should still have a low overhead. See the
880  // tightened_variables field in CpSolverResponse for more details.
881  optional bool fill_tightened_domains_in_response = 132 [default = false];
882
883  // If true, the final response addition_solutions field will be filled with
884  // all solutions from our solutions pool.
885  //
886  // Note that if both this field and enumerate_all_solutions is true, we will
887  // copy to the pool all of the solution found. So if solution_pool_size is big
888  // enough, you can get all solutions this way instead of using the solution
889  // callback.
890  //
891  // Note that this only affect the "final" solution, not the one passed to the
892  // solution callbacks.
893  optional bool fill_additional_solutions_in_response = 194 [default = false];
894
895  // If true, the solver will add a default integer branching strategy to the
896  // already defined search strategy. If not, some variable might still not be
897  // fixed at the end of the search. For now we assume these variable can just
898  // be set to their lower bound.
899  optional bool instantiate_all_variables = 106 [default = true];
900
901  // If true, then the precedences propagator try to detect for each variable if
902  // it has a set of "optional incoming arc" for which at least one of them is
903  // present. This is usually useful to have but can be slow on model with a lot
904  // of precedence.
905  optional bool auto_detect_greater_than_at_least_one_of = 95 [default = true];
906
907  // For an optimization problem, stop the solver as soon as we have a solution.
908  optional bool stop_after_first_solution = 98 [default = false];
909
910  // Mainly used when improving the presolver. When true, stops the solver after
911  // the presolve is complete.
912  optional bool stop_after_presolve = 149 [default = false];
913
914  // Specify the number of parallel workers to use during search.
915  // A value of 0 means the solver will try to use all cores on the machine.
916  // A number of 1 means no parallelism.
917  // As of 2020-04-10, if you're using SAT via MPSolver (to solve integer
918  // programs) this field is overridden with a value of 8, if the field is not
919  // set *explicitly*. Thus, always set this field explicitly or via
920  // MPSolver::SetNumThreads().
921  optional int32 num_search_workers = 100 [default = 0];
922
923  // Experimental. If this is true, then we interleave all our major search
924  // strategy and distribute the work amongst num_search_workers.
925  //
926  // The search is deterministic (independently of num_search_workers!), and we
927  // schedule and wait for interleave_batch_size task to be completed before
928  // synchronizing and scheduling the next batch of tasks.
929  optional bool interleave_search = 136 [default = false];
930  optional int32 interleave_batch_size = 134 [default = 1];
931
932  // Temporary parameter until the memory usage is more optimized.
933  optional bool reduce_memory_usage_in_interleave_mode = 141 [default = false];
934
935  // Allows objective sharing between workers.
936  optional bool share_objective_bounds = 113 [default = true];
937
938  // Allows sharing of the bounds of modified variables at level 0.
939  optional bool share_level_zero_bounds = 114 [default = true];
940
941  // LNS parameters.
942  optional bool use_lns_only = 101 [default = false];
943
944  // Size of the top-n different solutions kept by the solver.
945  // Currently this only impact the "base" solution chosen for a LNS fragment.
946  optional int32 solution_pool_size = 193 [default = 3];
947
948  // Turns on relaxation induced neighborhood generator.
949  optional bool use_rins_lns = 129 [default = true];
950
951  // Adds a feasibility pump subsolver along with lns subsolvers.
952  optional bool use_feasibility_pump = 164 [default = true];
953
954  // Rounding method to use for feasibility pump.
955  enum FPRoundingMethod {
956    // Rounds to the nearest integer value.
957    NEAREST_INTEGER = 0;
958
959    // Counts the number of linear constraints restricting the variable in the
960    // increasing values (up locks) and decreasing values (down locks). Rounds
961    // the variable in the direction of lesser locks.
962    LOCK_BASED = 1;
963
964    // Similar to lock based rounding except this only considers locks of active
965    // constraints from the last lp solve.
966    ACTIVE_LOCK_BASED = 3;
967
968    // This is expensive rounding algorithm. We round variables one by one and
969    // propagate the bounds in between. If none of the rounded values fall in
970    // the continuous domain specified by lower and upper bound, we use the
971    // current lower/upper bound (whichever one is closest) instead of rounding
972    // the fractional lp solution value. If both the rounded values are in the
973    // domain, we round to nearest integer.
974    PROPAGATION_ASSISTED = 2;
975  }
976  optional FPRoundingMethod fp_rounding = 165 [default = PROPAGATION_ASSISTED];
977
978  // Turns on a lns worker which solves relaxed version of the original problem
979  // by removing constraints from the problem in order to get better bounds.
980  optional bool use_relaxation_lns = 150 [default = false];
981
982  // If true, registers more lns subsolvers with different parameters.
983  optional bool diversify_lns_params = 137 [default = false];
984
985  // Randomize fixed search.
986  optional bool randomize_search = 103 [default = false];
987
988  // Search randomization will collect equivalent 'max valued' variables, and
989  // pick one randomly. For instance, if the variable strategy is CHOOSE_FIRST,
990  // all unassigned variables are equivalent. If the variable strategy is
991  // CHOOSE_LOWEST_MIN, and `lm` is the current lowest min of all unassigned
992  // variables, then the set of max valued variables will be all unassigned
993  // variables where
994  //    lm <= variable min <= lm + search_randomization_tolerance
995  optional int64 search_randomization_tolerance = 104 [default = 0];
996
997  // If true, we automatically detect variables whose constraint are always
998  // enforced by the same literal and we mark them as optional. This allows
999  // to propagate them as if they were present in some situation.
1000  optional bool use_optional_variables = 108 [default = true];
1001
1002  // The solver usually exploit the LP relaxation of a model. If this option is
1003  // true, then whatever is infered by the LP will be used like an heuristic to
1004  // compute EXACT propagation on the IP. So with this option, there is no
1005  // numerical imprecision issues.
1006  optional bool use_exact_lp_reason = 109 [default = true];
1007
1008  // If true, the solver attemts to generate more info inside lp propagator by
1009  // branching on some variables if certain criteria are met during the search
1010  // tree exploration.
1011  optional bool use_branching_in_lp = 139 [default = false];
1012
1013  // This can be beneficial if there is a lot of no-overlap constraints but a
1014  // relatively low number of different intervals in the problem. Like 1000
1015  // intervals, but 1M intervals in the no-overlap constraints covering them.
1016  optional bool use_combined_no_overlap = 133 [default = false];
1017
1018  // Indicates if the CP-SAT layer should catch Control-C (SIGINT) signals
1019  // when calling solve. If set, catching the SIGINT signal will terminate the
1020  // search gracefully, as if a time limit was reached.
1021  optional bool catch_sigint_signal = 135 [default = true];
1022
1023  // Stores and exploits "implied-bounds" in the solver. That is, relations of
1024  // the form literal => (var >= bound). This is currently used to derive
1025  // stronger cuts.
1026  optional bool use_implied_bounds = 144 [default = true];
1027
1028  // Whether we try to do a few degenerate iteration at the end of an LP solve
1029  // to minimize the fractionality of the integer variable in the basis. This
1030  // helps on some problems, but not so much on others. It also cost of bit of
1031  // time to do such polish step.
1032  optional bool polish_lp_solution = 175 [default = false];
1033
1034  // Temporary flag util the feature is more mature. This convert intervals to
1035  // the newer proto format that support affine start/var/end instead of just
1036  // variables.
1037  optional bool convert_intervals = 177 [default = true];
1038
1039  // Whether we try to automatically detect the symmetries in a model and
1040  // exploit them. Currently, at level 1 we detect them in presolve and try
1041  // to fix Booleans. At level 2, we also do some form of dynamic symmetry
1042  // breaking during search.
1043  optional int32 symmetry_level = 183 [default = 2];
1044
1045  // ==========================================================================
1046  // MIP -> CP-SAT (i.e. IP with integer coeff) conversion parameters that are
1047  // used by our automatic "scaling" algorithm.
1048  //
1049  // Note that it is hard to do a meaningful conversion automatically and if
1050  // you have a model with continuous variables, it is best if you scale the
1051  // domain of the variable yourself so that you have a relevant precision for
1052  // the application at hand. Same for the coefficients and constraint bounds.
1053  // ==========================================================================
1054
1055  // We need to bound the maximum magnitude of the variables for CP-SAT, and
1056  // that is the bound we use. If the MIP model expect larger variable value in
1057  // the solution, then the converted model will likely not be relevant.
1058  optional double mip_max_bound = 124 [default = 1e7];
1059
1060  // All continuous variable of the problem will be multiplied by this factor.
1061  // By default, we don't do any variable scaling and rely on the MIP model to
1062  // specify continuous variable domain with the wanted precision.
1063  optional double mip_var_scaling = 125 [default = 1.0];
1064
1065  // If true, some continuous variable might be automatically scaled. For now,
1066  // this is only the case where we detect that a variable is actually an
1067  // integer multiple of a constant. For instance, variables of the form k * 0.5
1068  // are quite frequent, and if we detect this, we will scale such variable
1069  // domain by 2 to make it implied integer.
1070  optional bool mip_automatically_scale_variables = 166 [default = true];
1071
1072  // When scaling constraint with double coefficients to integer coefficients,
1073  // we will multiply by a power of 2 and round the coefficients. We will choose
1074  // the lowest power such that we have no potential overflow and the worst case
1075  // constraint activity error do not exceed this threshold relative to the
1076  // constraint bounds.
1077  //
1078  // We also use this to decide by how much we relax the constraint bounds so
1079  // that we can have a feasible integer solution of constraints involving
1080  // continuous variable. This is required for instance when you have an == rhs
1081  // constraint as in many situation you cannot have a perfect equality with
1082  // integer variables and coefficients.
1083  optional double mip_wanted_precision = 126 [default = 1e-6];
1084
1085  // To avoid integer overflow, we always force the maximum possible constraint
1086  // activity (and objective value) according to the initial variable domain to
1087  // be smaller than 2 to this given power. Because of this, we cannot always
1088  // reach the "mip_wanted_precision" parameter above.
1089  //
1090  // This can go as high as 62, but some internal algo currently abort early if
1091  // they might run into integer overflow, so it is better to keep it a bit
1092  // lower than this.
1093  optional int32 mip_max_activity_exponent = 127 [default = 53];
1094
1095  // As explained in mip_precision and mip_max_activity_exponent, we cannot
1096  // always reach the wanted precision during scaling. We use this threshold to
1097  // enphasize in the logs when the precision seems bad.
1098  optional double mip_check_precision = 128 [default = 1e-4];
1099
1100  // Even if we make big error when scaling the objective, we can always derive
1101  // a correct lower bound on the original objective by using the exact lower
1102  // bound on the scaled integer version of the objective. This should be fast,
1103  // but if you don't care about having a precise lower bound, you can turn it
1104  // off.
1105  optional bool mip_compute_true_objective_bound = 198 [default = true];
1106
1107  // Any finite values in the input MIP must be below this threshold, otherwise
1108  // the model will be reported invalid. This is needed to avoid floating point
1109  // overflow when evaluating bounds * coeff for instance. We are a bit more
1110  // defensive, but in practice, users shouldn't use super large values in a
1111  // MIP.
1112  optional double mip_max_valid_magnitude = 199 [default = 1e30];
1113}
1114