1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 //     http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 #include "ortools/sat/precedences.h"
15 
16 #include <algorithm>
17 #include <memory>
18 
19 #include "ortools/base/cleanup.h"
20 #include "ortools/base/logging.h"
21 #include "ortools/base/stl_util.h"
22 #include "ortools/base/strong_vector.h"
23 #include "ortools/sat/clause.h"
24 #include "ortools/sat/cp_constraints.h"
25 
26 namespace operations_research {
27 namespace sat {
28 
29 namespace {
30 
AppendLowerBoundReasonIfValid(IntegerVariable var,const IntegerTrail & i_trail,std::vector<IntegerLiteral> * reason)31 void AppendLowerBoundReasonIfValid(IntegerVariable var,
32                                    const IntegerTrail& i_trail,
33                                    std::vector<IntegerLiteral>* reason) {
34   if (var != kNoIntegerVariable) {
35     reason->push_back(i_trail.LowerBoundAsLiteral(var));
36   }
37 }
38 
39 }  // namespace
40 
Propagate(Trail * trail)41 bool PrecedencesPropagator::Propagate(Trail* trail) { return Propagate(); }
42 
Propagate()43 bool PrecedencesPropagator::Propagate() {
44   while (propagation_trail_index_ < trail_->Index()) {
45     const Literal literal = (*trail_)[propagation_trail_index_++];
46     if (literal.Index() >= literal_to_new_impacted_arcs_.size()) continue;
47 
48     // IMPORTANT: Because of the way Untrail() work, we need to add all the
49     // potential arcs before we can abort. It is why we iterate twice here.
50     for (const ArcIndex arc_index :
51          literal_to_new_impacted_arcs_[literal.Index()]) {
52       if (--arc_counts_[arc_index] == 0) {
53         const ArcInfo& arc = arcs_[arc_index];
54         impacted_arcs_[arc.tail_var].push_back(arc_index);
55       }
56     }
57 
58     // Iterate again to check for a propagation and indirectly update
59     // modified_vars_.
60     for (const ArcIndex arc_index :
61          literal_to_new_impacted_arcs_[literal.Index()]) {
62       if (arc_counts_[arc_index] > 0) continue;
63       const ArcInfo& arc = arcs_[arc_index];
64       if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue;
65       const IntegerValue new_head_lb =
66           integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc);
67       if (new_head_lb > integer_trail_->LowerBound(arc.head_var)) {
68         if (!EnqueueAndCheck(arc, new_head_lb, trail_)) return false;
69       }
70     }
71   }
72 
73   // Do the actual propagation of the IntegerVariable bounds.
74   InitializeBFQueueWithModifiedNodes();
75   if (!BellmanFordTarjan(trail_)) return false;
76 
77   // We can only test that no propagation is left if we didn't enqueue new
78   // literal in the presence of optional variables.
79   //
80   // TODO(user): Because of our code to deal with InPropagationLoop(), this is
81   // not always true. Find a cleaner way to DCHECK() while not failing in this
82   // corner case.
83   if (/*DISABLES CODE*/ (false) &&
84       propagation_trail_index_ == trail_->Index()) {
85     DCHECK(NoPropagationLeft(*trail_));
86   }
87 
88   // Propagate the presence literals of the arcs that can't be added.
89   PropagateOptionalArcs(trail_);
90 
91   // Clean-up modified_vars_ to do as little as possible on the next call.
92   modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables());
93   return true;
94 }
95 
PropagateOutgoingArcs(IntegerVariable var)96 bool PrecedencesPropagator::PropagateOutgoingArcs(IntegerVariable var) {
97   CHECK_NE(var, kNoIntegerVariable);
98   if (var >= impacted_arcs_.size()) return true;
99   for (const ArcIndex arc_index : impacted_arcs_[var]) {
100     const ArcInfo& arc = arcs_[arc_index];
101     if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue;
102     const IntegerValue new_head_lb =
103         integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc);
104     if (new_head_lb > integer_trail_->LowerBound(arc.head_var)) {
105       if (!EnqueueAndCheck(arc, new_head_lb, trail_)) return false;
106     }
107   }
108   return true;
109 }
110 
Untrail(const Trail & trail,int trail_index)111 void PrecedencesPropagator::Untrail(const Trail& trail, int trail_index) {
112   if (propagation_trail_index_ > trail_index) {
113     // This means that we already propagated all there is to propagate
114     // at the level trail_index, so we can safely clear modified_vars_ in case
115     // it wasn't already done.
116     modified_vars_.ClearAndResize(integer_trail_->NumIntegerVariables());
117   }
118   while (propagation_trail_index_ > trail_index) {
119     const Literal literal = trail[--propagation_trail_index_];
120     if (literal.Index() >= literal_to_new_impacted_arcs_.size()) continue;
121     for (const ArcIndex arc_index :
122          literal_to_new_impacted_arcs_[literal.Index()]) {
123       if (arc_counts_[arc_index]++ == 0) {
124         const ArcInfo& arc = arcs_[arc_index];
125         impacted_arcs_[arc.tail_var].pop_back();
126       }
127     }
128   }
129 }
130 
131 // Instead of simply sorting the IntegerPrecedences returned by .var,
132 // experiments showed that it is faster to regroup all the same .var "by hand"
133 // by first computing how many times they appear and then apply the sorting
134 // permutation.
ComputePrecedences(const std::vector<IntegerVariable> & vars,std::vector<IntegerPrecedences> * output)135 void PrecedencesPropagator::ComputePrecedences(
136     const std::vector<IntegerVariable>& vars,
137     std::vector<IntegerPrecedences>* output) {
138   tmp_sorted_vars_.clear();
139   tmp_precedences_.clear();
140   for (int index = 0; index < vars.size(); ++index) {
141     const IntegerVariable var = vars[index];
142     CHECK_NE(kNoIntegerVariable, var);
143     if (var >= impacted_arcs_.size()) continue;
144     for (const ArcIndex arc_index : impacted_arcs_[var]) {
145       const ArcInfo& arc = arcs_[arc_index];
146       if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue;
147 
148       IntegerValue offset = arc.offset;
149       if (arc.offset_var != kNoIntegerVariable) {
150         offset += integer_trail_->LowerBound(arc.offset_var);
151       }
152 
153       // TODO(user): it seems better to ignore negative min offset as we will
154       // often have relation of the form interval_start >= interval_end -
155       // offset, and such relation are usually not useful. Revisit this in case
156       // we see problems where we can propagate more without this test.
157       if (offset < 0) continue;
158 
159       if (var_to_degree_[arc.head_var] == 0) {
160         tmp_sorted_vars_.push_back(
161             {arc.head_var, integer_trail_->LowerBound(arc.head_var)});
162       } else {
163         // This "seen" mechanism is needed because we may have multi-arc and we
164         // don't want any duplicates in the "is_before" relation. Note that it
165         // works because var_to_last_index_ is reset by the var_to_degree_ == 0
166         // case.
167         if (var_to_last_index_[arc.head_var] == index) continue;
168       }
169       var_to_last_index_[arc.head_var] = index;
170       var_to_degree_[arc.head_var]++;
171       tmp_precedences_.push_back(
172           {index, arc.head_var, arc_index.value(), offset});
173     }
174   }
175 
176   // This order is a topological order for the precedences relation order
177   // provided that all the offset between the involved IntegerVariable are
178   // positive.
179   //
180   // TODO(user): use an order that is always topological? This is not clear
181   // since it may be slower to compute and not worth it because the order below
182   // is more natural and may work better.
183   std::sort(tmp_sorted_vars_.begin(), tmp_sorted_vars_.end());
184 
185   // Permute tmp_precedences_ into the output to put it in the correct order.
186   // For that we transform var_to_degree_ to point to the first position of
187   // each lbvar in the output vector.
188   int start = 0;
189   for (const SortedVar pair : tmp_sorted_vars_) {
190     const int degree = var_to_degree_[pair.var];
191     if (degree > 1) {
192       var_to_degree_[pair.var] = start;
193       start += degree;
194     } else {
195       // Optimization: we remove degree one relations.
196       var_to_degree_[pair.var] = -1;
197     }
198   }
199   output->resize(start);
200   for (const IntegerPrecedences& precedence : tmp_precedences_) {
201     if (var_to_degree_[precedence.var] < 0) continue;
202     (*output)[var_to_degree_[precedence.var]++] = precedence;
203   }
204 
205   // Cleanup var_to_degree_, note that we don't need to clean
206   // var_to_last_index_.
207   for (const SortedVar pair : tmp_sorted_vars_) {
208     var_to_degree_[pair.var] = 0;
209   }
210 }
211 
AddPrecedenceReason(int arc_index,IntegerValue min_offset,std::vector<Literal> * literal_reason,std::vector<IntegerLiteral> * integer_reason) const212 void PrecedencesPropagator::AddPrecedenceReason(
213     int arc_index, IntegerValue min_offset,
214     std::vector<Literal>* literal_reason,
215     std::vector<IntegerLiteral>* integer_reason) const {
216   const ArcInfo& arc = arcs_[ArcIndex(arc_index)];
217   for (const Literal l : arc.presence_literals) {
218     literal_reason->push_back(l.Negated());
219   }
220   if (arc.offset_var != kNoIntegerVariable) {
221     // Reason for ArcOffset(arc) to be >= min_offset.
222     integer_reason->push_back(IntegerLiteral::GreaterOrEqual(
223         arc.offset_var, min_offset - arc.offset));
224   }
225 }
226 
AdjustSizeFor(IntegerVariable i)227 void PrecedencesPropagator::AdjustSizeFor(IntegerVariable i) {
228   const int index = std::max(i.value(), NegationOf(i).value());
229   if (index >= impacted_arcs_.size()) {
230     // TODO(user): only watch lower bound of the relevant variable instead
231     // of watching everything in [0, max_index_of_variable_used_in_this_class).
232     for (IntegerVariable var(impacted_arcs_.size()); var <= index; ++var) {
233       watcher_->WatchLowerBound(var, watcher_id_);
234     }
235     impacted_arcs_.resize(index + 1);
236     impacted_potential_arcs_.resize(index + 1);
237     var_to_degree_.resize(index + 1);
238     var_to_last_index_.resize(index + 1);
239   }
240 }
241 
AddArc(IntegerVariable tail,IntegerVariable head,IntegerValue offset,IntegerVariable offset_var,absl::Span<const Literal> presence_literals)242 void PrecedencesPropagator::AddArc(
243     IntegerVariable tail, IntegerVariable head, IntegerValue offset,
244     IntegerVariable offset_var, absl::Span<const Literal> presence_literals) {
245   DCHECK_EQ(trail_->CurrentDecisionLevel(), 0);
246   AdjustSizeFor(tail);
247   AdjustSizeFor(head);
248   if (offset_var != kNoIntegerVariable) AdjustSizeFor(offset_var);
249 
250   // This arc is present iff all the literals here are true.
251   absl::InlinedVector<Literal, 6> enforcement_literals;
252   {
253     for (const Literal l : presence_literals) {
254       enforcement_literals.push_back(l);
255     }
256     if (integer_trail_->IsOptional(tail)) {
257       enforcement_literals.push_back(
258           integer_trail_->IsIgnoredLiteral(tail).Negated());
259     }
260     if (integer_trail_->IsOptional(head)) {
261       enforcement_literals.push_back(
262           integer_trail_->IsIgnoredLiteral(head).Negated());
263     }
264     if (offset_var != kNoIntegerVariable &&
265         integer_trail_->IsOptional(offset_var)) {
266       enforcement_literals.push_back(
267           integer_trail_->IsIgnoredLiteral(offset_var).Negated());
268     }
269     gtl::STLSortAndRemoveDuplicates(&enforcement_literals);
270     int new_size = 0;
271     for (const Literal l : enforcement_literals) {
272       if (trail_->Assignment().LiteralIsTrue(Literal(l))) {
273         continue;  // At true, ignore this literal.
274       } else if (trail_->Assignment().LiteralIsFalse(Literal(l))) {
275         return;  // At false, ignore completely this arc.
276       }
277       enforcement_literals[new_size++] = l;
278     }
279     enforcement_literals.resize(new_size);
280   }
281 
282   if (head == tail) {
283     // A self-arc is either plain SAT or plain UNSAT or it forces something on
284     // the given offset_var or presence_literal_index. In any case it could be
285     // presolved in something more efficient.
286     VLOG(1) << "Self arc! This could be presolved. "
287             << "var:" << tail << " offset:" << offset
288             << " offset_var:" << offset_var
289             << " conditioned_by:" << presence_literals;
290   }
291 
292   // Remove the offset_var if it is fixed.
293   // TODO(user): We should also handle the case where tail or head is fixed.
294   if (offset_var != kNoIntegerVariable) {
295     const IntegerValue lb = integer_trail_->LowerBound(offset_var);
296     if (lb == integer_trail_->UpperBound(offset_var)) {
297       offset += lb;
298       offset_var = kNoIntegerVariable;
299     }
300   }
301 
302   // Deal first with impacted_potential_arcs_/potential_arcs_.
303   if (!enforcement_literals.empty()) {
304     const OptionalArcIndex arc_index(potential_arcs_.size());
305     potential_arcs_.push_back(
306         {tail, head, offset, offset_var, enforcement_literals});
307     impacted_potential_arcs_[tail].push_back(arc_index);
308     impacted_potential_arcs_[NegationOf(head)].push_back(arc_index);
309     if (offset_var != kNoIntegerVariable) {
310       impacted_potential_arcs_[offset_var].push_back(arc_index);
311     }
312   }
313 
314   // Now deal with impacted_arcs_/arcs_.
315   struct InternalArc {
316     IntegerVariable tail_var;
317     IntegerVariable head_var;
318     IntegerVariable offset_var;
319   };
320   std::vector<InternalArc> to_add;
321   if (offset_var == kNoIntegerVariable) {
322     // a + offset <= b and -b + offset <= -a
323     to_add.push_back({tail, head, kNoIntegerVariable});
324     to_add.push_back({NegationOf(head), NegationOf(tail), kNoIntegerVariable});
325   } else {
326     // tail (a) and offset_var (b) are symmetric, so we add:
327     // - a + b + offset <= c
328     to_add.push_back({tail, head, offset_var});
329     to_add.push_back({offset_var, head, tail});
330     // - a - c + offset <= -b
331     to_add.push_back({tail, NegationOf(offset_var), NegationOf(head)});
332     to_add.push_back({NegationOf(head), NegationOf(offset_var), tail});
333     // - b - c + offset <= -a
334     to_add.push_back({offset_var, NegationOf(tail), NegationOf(head)});
335     to_add.push_back({NegationOf(head), NegationOf(tail), offset_var});
336   }
337   for (const InternalArc a : to_add) {
338     // Since we add a new arc, we will need to consider its tail during the next
339     // propagation. Note that the size of modified_vars_ will be automatically
340     // updated when new integer variables are created since we register it with
341     // IntegerTrail in this class constructor.
342     //
343     // TODO(user): Adding arcs and then calling Untrail() before Propagate()
344     // will cause this mecanism to break. Find a more robust implementation.
345     //
346     // TODO(user): In some rare corner case, rescanning the whole list of arc
347     // leaving tail_var can make AddVar() have a quadratic complexity where it
348     // shouldn't. A better solution would be to see if this new arc currently
349     // propagate something, and if it does, just update the lower bound of
350     // a.head_var and let the normal "is modified" mecanism handle any eventual
351     // follow up propagations.
352     modified_vars_.Set(a.tail_var);
353 
354     // If a.head_var is optional, we can potentially remove some literal from
355     // enforcement_literals.
356     const ArcIndex arc_index(arcs_.size());
357     arcs_.push_back(
358         {a.tail_var, a.head_var, offset, a.offset_var, enforcement_literals});
359     auto& presence_literals = arcs_.back().presence_literals;
360     if (integer_trail_->IsOptional(a.head_var)) {
361       // TODO(user): More generally, we can remove any literal that is implied
362       // by to_remove.
363       const Literal to_remove =
364           integer_trail_->IsIgnoredLiteral(a.head_var).Negated();
365       const auto it = std::find(presence_literals.begin(),
366                                 presence_literals.end(), to_remove);
367       if (it != presence_literals.end()) presence_literals.erase(it);
368     }
369 
370     if (presence_literals.empty()) {
371       impacted_arcs_[a.tail_var].push_back(arc_index);
372     } else {
373       for (const Literal l : presence_literals) {
374         if (l.Index() >= literal_to_new_impacted_arcs_.size()) {
375           literal_to_new_impacted_arcs_.resize(l.Index().value() + 1);
376         }
377         literal_to_new_impacted_arcs_[l.Index()].push_back(arc_index);
378       }
379     }
380     arc_counts_.push_back(presence_literals.size());
381   }
382 }
383 
384 // TODO(user): On jobshop problems with a lot of tasks per machine (500), this
385 // takes up a big chunck of the running time even before we find a solution.
386 // This is because, for each lower bound changed, we inspect 500 arcs even
387 // though they will never be propagated because the other bound is still at the
388 // horizon. Find an even sparser algorithm?
PropagateOptionalArcs(Trail * trail)389 void PrecedencesPropagator::PropagateOptionalArcs(Trail* trail) {
390   for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) {
391     // The variables are not in increasing order, so we need to continue.
392     if (var >= impacted_potential_arcs_.size()) continue;
393 
394     // Note that we can currently check the same ArcInfo up to 3 times, one for
395     // each of the arc variables: tail, NegationOf(head) and offset_var.
396     for (const OptionalArcIndex arc_index : impacted_potential_arcs_[var]) {
397       const ArcInfo& arc = potential_arcs_[arc_index];
398       int num_not_true = 0;
399       Literal to_propagate;
400       for (const Literal l : arc.presence_literals) {
401         if (!trail->Assignment().LiteralIsTrue(l)) {
402           ++num_not_true;
403           to_propagate = l;
404         }
405       }
406       if (num_not_true != 1) continue;
407       if (trail->Assignment().LiteralIsFalse(to_propagate)) continue;
408 
409       // Test if this arc can be present or not.
410       // Important arc.tail_var can be different from var here.
411       const IntegerValue tail_lb = integer_trail_->LowerBound(arc.tail_var);
412       const IntegerValue head_ub = integer_trail_->UpperBound(arc.head_var);
413       if (tail_lb + ArcOffset(arc) > head_ub) {
414         integer_reason_.clear();
415         integer_reason_.push_back(
416             integer_trail_->LowerBoundAsLiteral(arc.tail_var));
417         integer_reason_.push_back(
418             integer_trail_->UpperBoundAsLiteral(arc.head_var));
419         AppendLowerBoundReasonIfValid(arc.offset_var, *integer_trail_,
420                                       &integer_reason_);
421         literal_reason_.clear();
422         for (const Literal l : arc.presence_literals) {
423           if (l != to_propagate) literal_reason_.push_back(l.Negated());
424         }
425         integer_trail_->EnqueueLiteral(to_propagate.Negated(), literal_reason_,
426                                        integer_reason_);
427       }
428     }
429   }
430 }
431 
ArcOffset(const ArcInfo & arc) const432 IntegerValue PrecedencesPropagator::ArcOffset(const ArcInfo& arc) const {
433   return arc.offset + (arc.offset_var == kNoIntegerVariable
434                            ? IntegerValue(0)
435                            : integer_trail_->LowerBound(arc.offset_var));
436 }
437 
EnqueueAndCheck(const ArcInfo & arc,IntegerValue new_head_lb,Trail * trail)438 bool PrecedencesPropagator::EnqueueAndCheck(const ArcInfo& arc,
439                                             IntegerValue new_head_lb,
440                                             Trail* trail) {
441   DCHECK_GT(new_head_lb, integer_trail_->LowerBound(arc.head_var));
442 
443   // Compute the reason for new_head_lb.
444   //
445   // TODO(user): do like for clause and keep the negation of
446   // arc.presence_literals? I think we could change the integer.h API to accept
447   // true literal like for IntegerVariable, it is really confusing currently.
448   literal_reason_.clear();
449   for (const Literal l : arc.presence_literals) {
450     literal_reason_.push_back(l.Negated());
451   }
452 
453   integer_reason_.clear();
454   integer_reason_.push_back(integer_trail_->LowerBoundAsLiteral(arc.tail_var));
455   AppendLowerBoundReasonIfValid(arc.offset_var, *integer_trail_,
456                                 &integer_reason_);
457 
458   // The code works without this block since Enqueue() below can already take
459   // care of conflicts. However, it is better to deal with the conflict
460   // ourselves because we can be smarter about the reason this way.
461   //
462   // The reason for a "precedence" conflict is always a linear reason
463   // involving the tail lower_bound, the head upper bound and eventually the
464   // size lower bound. Because of that, we can use the RelaxLinearReason()
465   // code.
466   if (new_head_lb > integer_trail_->UpperBound(arc.head_var)) {
467     const IntegerValue slack =
468         new_head_lb - integer_trail_->UpperBound(arc.head_var) - 1;
469     integer_reason_.push_back(
470         integer_trail_->UpperBoundAsLiteral(arc.head_var));
471     std::vector<IntegerValue> coeffs(integer_reason_.size(), IntegerValue(1));
472     integer_trail_->RelaxLinearReason(slack, coeffs, &integer_reason_);
473 
474     if (!integer_trail_->IsOptional(arc.head_var)) {
475       return integer_trail_->ReportConflict(literal_reason_, integer_reason_);
476     } else {
477       CHECK(!integer_trail_->IsCurrentlyIgnored(arc.head_var));
478       const Literal l = integer_trail_->IsIgnoredLiteral(arc.head_var);
479       if (trail->Assignment().LiteralIsFalse(l)) {
480         literal_reason_.push_back(l);
481         return integer_trail_->ReportConflict(literal_reason_, integer_reason_);
482       } else {
483         integer_trail_->EnqueueLiteral(l, literal_reason_, integer_reason_);
484         return true;
485       }
486     }
487   }
488 
489   return integer_trail_->Enqueue(
490       IntegerLiteral::GreaterOrEqual(arc.head_var, new_head_lb),
491       literal_reason_, integer_reason_);
492 }
493 
NoPropagationLeft(const Trail & trail) const494 bool PrecedencesPropagator::NoPropagationLeft(const Trail& trail) const {
495   const int num_nodes = impacted_arcs_.size();
496   for (IntegerVariable var(0); var < num_nodes; ++var) {
497     for (const ArcIndex arc_index : impacted_arcs_[var]) {
498       const ArcInfo& arc = arcs_[arc_index];
499       if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue;
500       if (integer_trail_->LowerBound(arc.tail_var) + ArcOffset(arc) >
501           integer_trail_->LowerBound(arc.head_var)) {
502         return false;
503       }
504     }
505   }
506   return true;
507 }
508 
InitializeBFQueueWithModifiedNodes()509 void PrecedencesPropagator::InitializeBFQueueWithModifiedNodes() {
510   // Sparse clear of the queue. TODO(user): only use the sparse version if
511   // queue.size() is small or use SparseBitset.
512   const int num_nodes = impacted_arcs_.size();
513   bf_in_queue_.resize(num_nodes, false);
514   for (const int node : bf_queue_) bf_in_queue_[node] = false;
515   bf_queue_.clear();
516   DCHECK(std::none_of(bf_in_queue_.begin(), bf_in_queue_.end(),
517                       [](bool v) { return v; }));
518   for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) {
519     if (var >= num_nodes) continue;
520     bf_queue_.push_back(var.value());
521     bf_in_queue_[var.value()] = true;
522   }
523 }
524 
CleanUpMarkedArcsAndParents()525 void PrecedencesPropagator::CleanUpMarkedArcsAndParents() {
526   // To be sparse, we use the fact that each node with a parent must be in
527   // modified_vars_.
528   const int num_nodes = impacted_arcs_.size();
529   for (const IntegerVariable var : modified_vars_.PositionsSetAtLeastOnce()) {
530     if (var >= num_nodes) continue;
531     const ArcIndex parent_arc_index = bf_parent_arc_of_[var.value()];
532     if (parent_arc_index != -1) {
533       arcs_[parent_arc_index].is_marked = false;
534       bf_parent_arc_of_[var.value()] = -1;
535       bf_can_be_skipped_[var.value()] = false;
536     }
537   }
538   DCHECK(std::none_of(bf_parent_arc_of_.begin(), bf_parent_arc_of_.end(),
539                       [](ArcIndex v) { return v != -1; }));
540   DCHECK(std::none_of(bf_can_be_skipped_.begin(), bf_can_be_skipped_.end(),
541                       [](bool v) { return v; }));
542 }
543 
DisassembleSubtree(int source,int target,std::vector<bool> * can_be_skipped)544 bool PrecedencesPropagator::DisassembleSubtree(
545     int source, int target, std::vector<bool>* can_be_skipped) {
546   // Note that we explore a tree, so we can do it in any order, and the one
547   // below seems to be the fastest.
548   tmp_vector_.clear();
549   tmp_vector_.push_back(source);
550   while (!tmp_vector_.empty()) {
551     const int tail = tmp_vector_.back();
552     tmp_vector_.pop_back();
553     for (const ArcIndex arc_index : impacted_arcs_[IntegerVariable(tail)]) {
554       const ArcInfo& arc = arcs_[arc_index];
555       if (arc.is_marked) {
556         arc.is_marked = false;  // mutable.
557         if (arc.head_var.value() == target) return true;
558         DCHECK(!(*can_be_skipped)[arc.head_var.value()]);
559         (*can_be_skipped)[arc.head_var.value()] = true;
560         tmp_vector_.push_back(arc.head_var.value());
561       }
562     }
563   }
564   return false;
565 }
566 
AnalyzePositiveCycle(ArcIndex first_arc,Trail * trail,std::vector<Literal> * must_be_all_true,std::vector<Literal> * literal_reason,std::vector<IntegerLiteral> * integer_reason)567 void PrecedencesPropagator::AnalyzePositiveCycle(
568     ArcIndex first_arc, Trail* trail, std::vector<Literal>* must_be_all_true,
569     std::vector<Literal>* literal_reason,
570     std::vector<IntegerLiteral>* integer_reason) {
571   must_be_all_true->clear();
572   literal_reason->clear();
573   integer_reason->clear();
574 
575   // Follow bf_parent_arc_of_[] to find the cycle containing first_arc.
576   const IntegerVariable first_arc_head = arcs_[first_arc].head_var;
577   ArcIndex arc_index = first_arc;
578   std::vector<ArcIndex> arc_on_cycle;
579 
580   // Just to be safe and avoid an infinite loop we use the fact that the maximum
581   // cycle size on a graph with n nodes is of size n. If we have more in the
582   // code below, it means first_arc is not part of a cycle according to
583   // bf_parent_arc_of_[], which should never happen.
584   const int num_nodes = impacted_arcs_.size();
585   while (arc_on_cycle.size() <= num_nodes) {
586     arc_on_cycle.push_back(arc_index);
587     const ArcInfo& arc = arcs_[arc_index];
588     if (arc.tail_var == first_arc_head) break;
589     arc_index = bf_parent_arc_of_[arc.tail_var.value()];
590     CHECK_NE(arc_index, ArcIndex(-1));
591   }
592   CHECK_NE(arc_on_cycle.size(), num_nodes + 1) << "Infinite loop.";
593 
594   // Compute the reason for this cycle.
595   IntegerValue sum(0);
596   for (const ArcIndex arc_index : arc_on_cycle) {
597     const ArcInfo& arc = arcs_[arc_index];
598     sum += ArcOffset(arc);
599     AppendLowerBoundReasonIfValid(arc.offset_var, *integer_trail_,
600                                   integer_reason);
601     for (const Literal l : arc.presence_literals) {
602       literal_reason->push_back(l.Negated());
603     }
604 
605     // If the cycle happens to contain optional variable not yet ignored, then
606     // it is not a conflict anymore, but we can infer that these variable must
607     // all be ignored. This is because since we propagated them even if they
608     // where not present for sure, their presence literal must form a cycle
609     // together (i.e. they are all absent or present at the same time).
610     if (integer_trail_->IsOptional(arc.head_var)) {
611       must_be_all_true->push_back(
612           integer_trail_->IsIgnoredLiteral(arc.head_var));
613     }
614   }
615 
616   // TODO(user): what if the sum overflow? this is just a check so I guess
617   // we don't really care, but fix the issue.
618   CHECK_GT(sum, 0);
619 }
620 
621 // Note that in our settings it is important to use an algorithm that tries to
622 // minimize the number of integer_trail_->Enqueue() as much as possible.
623 //
624 // TODO(user): The current algorithm is quite efficient, but there is probably
625 // still room for improvements.
BellmanFordTarjan(Trail * trail)626 bool PrecedencesPropagator::BellmanFordTarjan(Trail* trail) {
627   const int num_nodes = impacted_arcs_.size();
628 
629   // These vector are reset by CleanUpMarkedArcsAndParents() so resize is ok.
630   bf_can_be_skipped_.resize(num_nodes, false);
631   bf_parent_arc_of_.resize(num_nodes, ArcIndex(-1));
632   const auto cleanup =
633       ::absl::MakeCleanup([this]() { CleanUpMarkedArcsAndParents(); });
634 
635   // The queue initialization is done by InitializeBFQueueWithModifiedNodes().
636   while (!bf_queue_.empty()) {
637     const int node = bf_queue_.front();
638     bf_queue_.pop_front();
639     bf_in_queue_[node] = false;
640 
641     // TODO(user): we don't need bf_can_be_skipped_ since we can detect this
642     // if this node has a parent arc which is not marked. Investigate if it is
643     // faster without the vector<bool>.
644     //
645     // TODO(user): An alternative algorithm is to remove all these nodes from
646     // the queue instead of simply marking them. This should also lead to a
647     // better "relaxation" order of the arcs. It is however a bit more work to
648     // remove them since we need to track their position.
649     if (bf_can_be_skipped_[node]) {
650       DCHECK_NE(bf_parent_arc_of_[node], -1);
651       DCHECK(!arcs_[bf_parent_arc_of_[node]].is_marked);
652       continue;
653     }
654 
655     const IntegerValue tail_lb =
656         integer_trail_->LowerBound(IntegerVariable(node));
657     for (const ArcIndex arc_index : impacted_arcs_[IntegerVariable(node)]) {
658       const ArcInfo& arc = arcs_[arc_index];
659       DCHECK_EQ(arc.tail_var, node);
660       const IntegerValue candidate = tail_lb + ArcOffset(arc);
661       if (candidate > integer_trail_->LowerBound(arc.head_var)) {
662         if (integer_trail_->IsCurrentlyIgnored(arc.head_var)) continue;
663         if (!EnqueueAndCheck(arc, candidate, trail)) return false;
664 
665         // This is the Tarjan contribution to Bellman-Ford. This code detect
666         // positive cycle, and because it disassemble the subtree while doing
667         // so, the cost is amortized during the algorithm execution. Another
668         // advantages is that it will mark the node explored here as skippable
669         // which will avoid to propagate them too early (knowing that they will
670         // need to be propagated again later).
671         if (DisassembleSubtree(arc.head_var.value(), arc.tail_var.value(),
672                                &bf_can_be_skipped_)) {
673           std::vector<Literal> must_be_all_true;
674           AnalyzePositiveCycle(arc_index, trail, &must_be_all_true,
675                                &literal_reason_, &integer_reason_);
676           if (must_be_all_true.empty()) {
677             return integer_trail_->ReportConflict(literal_reason_,
678                                                   integer_reason_);
679           } else {
680             gtl::STLSortAndRemoveDuplicates(&must_be_all_true);
681             for (const Literal l : must_be_all_true) {
682               if (trail_->Assignment().LiteralIsFalse(l)) {
683                 literal_reason_.push_back(l);
684                 return integer_trail_->ReportConflict(literal_reason_,
685                                                       integer_reason_);
686               }
687             }
688             for (const Literal l : must_be_all_true) {
689               if (trail_->Assignment().LiteralIsTrue(l)) continue;
690               integer_trail_->EnqueueLiteral(l, literal_reason_,
691                                              integer_reason_);
692             }
693 
694             // We just marked some optional variable as ignored, no need
695             // to update bf_parent_arc_of_[].
696             continue;
697           }
698         }
699 
700         // We need to enforce the invariant that only the arc_index in
701         // bf_parent_arc_of_[] are marked (but not necessarily all of them
702         // since we unmark some in DisassembleSubtree()).
703         if (bf_parent_arc_of_[arc.head_var.value()] != -1) {
704           arcs_[bf_parent_arc_of_[arc.head_var.value()]].is_marked = false;
705         }
706 
707         // Tricky: We just enqueued the fact that the lower-bound of head is
708         // candidate. However, because the domain of head may be discrete, it is
709         // possible that the lower-bound of head is now higher than candidate!
710         // If this is the case, we don't update bf_parent_arc_of_[] so that we
711         // don't wrongly detect a positive weight cycle because of this "extra
712         // push".
713         const IntegerValue new_bound = integer_trail_->LowerBound(arc.head_var);
714         if (new_bound == candidate) {
715           bf_parent_arc_of_[arc.head_var.value()] = arc_index;
716           arcs_[arc_index].is_marked = true;
717         } else {
718           // We still unmark any previous dependency, since we have pushed the
719           // value of arc.head_var further.
720           bf_parent_arc_of_[arc.head_var.value()] = -1;
721         }
722 
723         // We do not re-enqueue if we are in a propagation loop and new_bound
724         // was not pushed to candidate or higher.
725         bf_can_be_skipped_[arc.head_var.value()] = false;
726         if (!bf_in_queue_[arc.head_var.value()] && new_bound >= candidate) {
727           bf_queue_.push_back(arc.head_var.value());
728           bf_in_queue_[arc.head_var.value()] = true;
729         }
730       }
731     }
732   }
733   return true;
734 }
735 
AddGreaterThanAtLeastOneOfConstraintsFromClause(const absl::Span<const Literal> clause,Model * model)736 int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraintsFromClause(
737     const absl::Span<const Literal> clause, Model* model) {
738   CHECK_EQ(model->GetOrCreate<Trail>()->CurrentDecisionLevel(), 0);
739   if (clause.size() < 2) return 0;
740 
741   // Collect all arcs impacted by this clause.
742   std::vector<ArcInfo> infos;
743   for (const Literal l : clause) {
744     if (l.Index() >= literal_to_new_impacted_arcs_.size()) continue;
745     for (const ArcIndex arc_index : literal_to_new_impacted_arcs_[l.Index()]) {
746       const ArcInfo& arc = arcs_[arc_index];
747       if (arc.presence_literals.size() != 1) continue;
748 
749       // TODO(user): Support variable offset.
750       if (arc.offset_var != kNoIntegerVariable) continue;
751       infos.push_back(arc);
752     }
753   }
754   if (infos.size() <= 1) return 0;
755 
756   // Stable sort by head_var so that for a same head_var, the entry are sorted
757   // by Literal as they appear in clause.
758   std::stable_sort(infos.begin(), infos.end(),
759                    [](const ArcInfo& a, const ArcInfo& b) {
760                      return a.head_var < b.head_var;
761                    });
762 
763   // We process ArcInfo with the same head_var toghether.
764   int num_added_constraints = 0;
765   auto* solver = model->GetOrCreate<SatSolver>();
766   for (int i = 0; i < infos.size();) {
767     const int start = i;
768     const IntegerVariable head_var = infos[start].head_var;
769     for (i++; i < infos.size() && infos[i].head_var == head_var; ++i) {
770     }
771     const absl::Span<ArcInfo> arcs(&infos[start], i - start);
772 
773     // Skip single arcs since it will already be fully propagated.
774     if (arcs.size() < 2) continue;
775 
776     // Heuristic. Look for full or almost full clauses. We could add
777     // GreaterThanAtLeastOneOf() with more enforcement literals. TODO(user):
778     // experiments.
779     if (arcs.size() + 1 < clause.size()) continue;
780 
781     std::vector<IntegerVariable> vars;
782     std::vector<IntegerValue> offsets;
783     std::vector<Literal> selectors;
784     std::vector<Literal> enforcements;
785 
786     int j = 0;
787     for (const Literal l : clause) {
788       bool added = false;
789       for (; j < arcs.size() && l == arcs[j].presence_literals.front(); ++j) {
790         added = true;
791         vars.push_back(arcs[j].tail_var);
792         offsets.push_back(arcs[j].offset);
793 
794         // Note that duplicate selector are supported.
795         //
796         // TODO(user): If we support variable offset, we should regroup the arcs
797         // into one (tail + offset <= head) though, instead of having too
798         // identical entries.
799         selectors.push_back(l);
800       }
801       if (!added) {
802         enforcements.push_back(l.Negated());
803       }
804     }
805 
806     // No point adding a constraint if there is not at least two different
807     // literals in selectors.
808     if (enforcements.size() + 1 == clause.size()) continue;
809 
810     ++num_added_constraints;
811     model->Add(GreaterThanAtLeastOneOf(head_var, vars, offsets, selectors,
812                                        enforcements));
813     if (!solver->FinishPropagation()) return num_added_constraints;
814   }
815   return num_added_constraints;
816 }
817 
818 int PrecedencesPropagator::
AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection(Model * model)819     AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection(Model* model) {
820   auto* time_limit = model->GetOrCreate<TimeLimit>();
821   auto* solver = model->GetOrCreate<SatSolver>();
822 
823   // Fill the set of incoming conditional arcs for each variables.
824   absl::StrongVector<IntegerVariable, std::vector<ArcIndex>> incoming_arcs_;
825   for (ArcIndex arc_index(0); arc_index < arcs_.size(); ++arc_index) {
826     const ArcInfo& arc = arcs_[arc_index];
827 
828     // Only keep arc that have a fixed offset and a single presence_literals.
829     if (arc.offset_var != kNoIntegerVariable) continue;
830     if (arc.tail_var == arc.head_var) continue;
831     if (arc.presence_literals.size() != 1) continue;
832 
833     if (arc.head_var >= incoming_arcs_.size()) {
834       incoming_arcs_.resize(arc.head_var.value() + 1);
835     }
836     incoming_arcs_[arc.head_var].push_back(arc_index);
837   }
838 
839   int num_added_constraints = 0;
840   for (IntegerVariable target(0); target < incoming_arcs_.size(); ++target) {
841     if (incoming_arcs_[target].size() <= 1) continue;
842     if (time_limit->LimitReached()) return num_added_constraints;
843 
844     // Detect set of incoming arcs for which at least one must be present.
845     // TODO(user): Find more than one disjoint set of incoming arcs.
846     // TODO(user): call MinimizeCoreWithPropagation() on the clause.
847     solver->Backtrack(0);
848     if (solver->IsModelUnsat()) return num_added_constraints;
849     std::vector<Literal> clause;
850     for (const ArcIndex arc_index : incoming_arcs_[target]) {
851       const Literal literal = arcs_[arc_index].presence_literals.front();
852       if (solver->Assignment().LiteralIsFalse(literal)) continue;
853 
854       const int old_level = solver->CurrentDecisionLevel();
855       solver->EnqueueDecisionAndBacktrackOnConflict(literal.Negated());
856       if (solver->IsModelUnsat()) return num_added_constraints;
857       const int new_level = solver->CurrentDecisionLevel();
858       if (new_level <= old_level) {
859         clause = solver->GetLastIncompatibleDecisions();
860         break;
861       }
862     }
863     solver->Backtrack(0);
864 
865     if (clause.size() > 1) {
866       // Extract the set of arc for which at least one must be present.
867       const std::set<Literal> clause_set(clause.begin(), clause.end());
868       std::vector<ArcIndex> arcs_in_clause;
869       for (const ArcIndex arc_index : incoming_arcs_[target]) {
870         const Literal literal(arcs_[arc_index].presence_literals.front());
871         if (gtl::ContainsKey(clause_set, literal.Negated())) {
872           arcs_in_clause.push_back(arc_index);
873         }
874       }
875 
876       VLOG(2) << arcs_in_clause.size() << "/" << incoming_arcs_[target].size();
877 
878       ++num_added_constraints;
879       std::vector<IntegerVariable> vars;
880       std::vector<IntegerValue> offsets;
881       std::vector<Literal> selectors;
882       for (const ArcIndex a : arcs_in_clause) {
883         vars.push_back(arcs_[a].tail_var);
884         offsets.push_back(arcs_[a].offset);
885         selectors.push_back(Literal(arcs_[a].presence_literals.front()));
886       }
887       model->Add(GreaterThanAtLeastOneOf(target, vars, offsets, selectors));
888       if (!solver->FinishPropagation()) return num_added_constraints;
889     }
890   }
891 
892   return num_added_constraints;
893 }
894 
AddGreaterThanAtLeastOneOfConstraints(Model * model)895 int PrecedencesPropagator::AddGreaterThanAtLeastOneOfConstraints(Model* model) {
896   VLOG(1) << "Detecting GreaterThanAtLeastOneOf() constraints...";
897   auto* time_limit = model->GetOrCreate<TimeLimit>();
898   auto* solver = model->GetOrCreate<SatSolver>();
899   auto* clauses = model->GetOrCreate<LiteralWatchers>();
900   int num_added_constraints = 0;
901 
902   // We have two possible approaches. For now, we prefer the first one except if
903   // there is too many clauses in the problem.
904   //
905   // TODO(user): Do more extensive experiment. Remove the second approach as
906   // it is more time consuming? or identify when it make sense. Note that the
907   // first approach also allows to use "incomplete" at least one between arcs.
908   if (clauses->AllClausesInCreationOrder().size() < 1e6) {
909     // TODO(user): This does not take into account clause of size 2 since they
910     // are stored in the BinaryImplicationGraph instead. Some ideas specific
911     // to size 2:
912     // - There can be a lot of such clauses, but it might be nice to consider
913     //   them. we need to experiments.
914     // - The automatic clause detection might be a better approach and it
915     //   could be combined with probing.
916     for (const SatClause* clause : clauses->AllClausesInCreationOrder()) {
917       if (time_limit->LimitReached()) return num_added_constraints;
918       if (solver->IsModelUnsat()) return num_added_constraints;
919       num_added_constraints += AddGreaterThanAtLeastOneOfConstraintsFromClause(
920           clause->AsSpan(), model);
921     }
922 
923     // It is common that there is only two alternatives to push a variable.
924     // In this case, our presolve most likely made sure that the two are
925     // controlled by a single Boolean. This allows to detect this and add the
926     // appropriate greater than at least one of.
927     const int num_booleans = solver->NumVariables();
928     if (num_booleans < 1e6) {
929       for (int i = 0; i < num_booleans; ++i) {
930         if (time_limit->LimitReached()) return num_added_constraints;
931         if (solver->IsModelUnsat()) return num_added_constraints;
932         num_added_constraints +=
933             AddGreaterThanAtLeastOneOfConstraintsFromClause(
934                 {Literal(BooleanVariable(i), true),
935                  Literal(BooleanVariable(i), false)},
936                 model);
937       }
938     }
939 
940   } else {
941     num_added_constraints +=
942         AddGreaterThanAtLeastOneOfConstraintsWithClauseAutoDetection(model);
943   }
944 
945   VLOG(1) << "Added " << num_added_constraints
946           << " GreaterThanAtLeastOneOf() constraints.";
947   return num_added_constraints;
948 }
949 
950 }  // namespace sat
951 }  // namespace operations_research
952