1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 //     http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13 
14 #include "ortools/sat/cumulative.h"
15 
16 #include <algorithm>
17 #include <memory>
18 
19 #include "ortools/base/int_type.h"
20 #include "ortools/base/logging.h"
21 #include "ortools/sat/cumulative_energy.h"
22 #include "ortools/sat/disjunctive.h"
23 #include "ortools/sat/linear_constraint.h"
24 #include "ortools/sat/pb_constraint.h"
25 #include "ortools/sat/precedences.h"
26 #include "ortools/sat/sat_base.h"
27 #include "ortools/sat/sat_parameters.pb.h"
28 #include "ortools/sat/sat_solver.h"
29 #include "ortools/sat/timetable.h"
30 #include "ortools/sat/timetable_edgefinding.h"
31 
32 namespace operations_research {
33 namespace sat {
34 
Cumulative(const std::vector<IntervalVariable> & vars,const std::vector<AffineExpression> & demands,AffineExpression capacity,SchedulingConstraintHelper * helper)35 std::function<void(Model*)> Cumulative(
36     const std::vector<IntervalVariable>& vars,
37     const std::vector<AffineExpression>& demands, AffineExpression capacity,
38     SchedulingConstraintHelper* helper) {
39   return [=](Model* model) mutable {
40     if (vars.empty()) return;
41 
42     auto* intervals = model->GetOrCreate<IntervalsRepository>();
43     auto* encoder = model->GetOrCreate<IntegerEncoder>();
44     auto* integer_trail = model->GetOrCreate<IntegerTrail>();
45     auto* watcher = model->GetOrCreate<GenericLiteralWatcher>();
46 
47     // Redundant constraints to ensure that the resource capacity is high enough
48     // for each task. Also ensure that no task consumes more resource than what
49     // is available. This is useful because the subsequent propagators do not
50     // filter the capacity variable very well.
51     for (int i = 0; i < demands.size(); ++i) {
52       if (intervals->MaxSize(vars[i]) == 0) continue;
53 
54       LinearConstraintBuilder builder(model, kMinIntegerValue, IntegerValue(0));
55       builder.AddTerm(demands[i], IntegerValue(1));
56       builder.AddTerm(capacity, IntegerValue(-1));
57       LinearConstraint ct = builder.Build();
58 
59       std::vector<Literal> enforcement_literals;
60       if (intervals->IsOptional(vars[i])) {
61         enforcement_literals.push_back(intervals->PresenceLiteral(vars[i]));
62       }
63 
64       // If the interval can be of size zero, it currently do not count towards
65       // the capacity. TODO(user): Change that since we have optional interval
66       // for this.
67       if (intervals->MinSize(vars[i]) == 0) {
68         enforcement_literals.push_back(encoder->GetOrCreateAssociatedLiteral(
69             intervals->Size(vars[i]).GreaterOrEqual(IntegerValue(1))));
70       }
71 
72       if (enforcement_literals.empty()) {
73         LoadLinearConstraint(ct, model);
74       } else {
75         LoadConditionalLinearConstraint(enforcement_literals, ct, model);
76       }
77     }
78 
79     if (vars.size() == 1) return;
80 
81     const SatParameters& parameters = *(model->GetOrCreate<SatParameters>());
82 
83     // Detect a subset of intervals that needs to be in disjunction and add a
84     // Disjunctive() constraint over them.
85     if (parameters.use_disjunctive_constraint_in_cumulative_constraint()) {
86       // TODO(user): We need to exclude intervals that can be of size zero
87       // because the disjunctive do not "ignore" them like the cumulative
88       // does. That is, the interval [2,2) will be assumed to be in
89       // disjunction with [1, 3) for instance. We need to uniformize the
90       // handling of interval with size zero.
91       //
92       // TODO(user): improve the condition (see CL147454185).
93       std::vector<IntervalVariable> in_disjunction;
94       for (int i = 0; i < vars.size(); ++i) {
95         if (intervals->MinSize(vars[i]) > 0 &&
96             2 * integer_trail->LowerBound(demands[i]) >
97                 integer_trail->UpperBound(capacity)) {
98           in_disjunction.push_back(vars[i]);
99         }
100       }
101 
102       // Add a disjunctive constraint on the intervals in in_disjunction. Do not
103       // create the cumulative at all when all intervals must be in disjunction.
104       //
105       // TODO(user): Do proper experiments to see how beneficial this is, the
106       // disjunctive will propagate more but is also using slower algorithms.
107       // That said, this is more a question of optimizing the disjunctive
108       // propagation code.
109       //
110       // TODO(user): Another "known" idea is to detect pair of tasks that must
111       // be in disjunction and to create a Boolean to indicate which one is
112       // before the other. It shouldn't change the propagation, but may result
113       // in a faster one with smaller explanations, and the solver can also take
114       // decision on such Boolean.
115       //
116       // TODO(user): A better place for stuff like this could be in the
117       // presolver so that it is easier to disable and play with alternatives.
118       if (in_disjunction.size() > 1) model->Add(Disjunctive(in_disjunction));
119       if (in_disjunction.size() == vars.size()) return;
120     }
121 
122     if (helper == nullptr) {
123       helper = new SchedulingConstraintHelper(vars, model);
124       model->TakeOwnership(helper);
125     }
126 
127     // For each variables that is after a subset of task ends (i.e. like a
128     // makespan objective), we detect it and add a special constraint to
129     // propagate it.
130     //
131     // TODO(user): Models that include the makespan as a special interval might
132     // be better, but then not everyone does that. In particular this code
133     // allows to have decent lower bound on the large cumulative minizinc
134     // instances.
135     //
136     // TODO(user): this require the precedence constraints to be already loaded,
137     // and there is no guarantee of that currently. Find a more robust way.
138     //
139     // TODO(user): There is a bit of code duplication with the disjunctive
140     // precedence propagator. Abstract more?
141     {
142       std::vector<IntegerVariable> index_to_end_vars;
143       std::vector<int> index_to_task;
144       std::vector<PrecedencesPropagator::IntegerPrecedences> before;
145       index_to_end_vars.clear();
146       for (int t = 0; t < helper->NumTasks(); ++t) {
147         const AffineExpression& end_exp = helper->Ends()[t];
148 
149         // TODO(user): Handle generic affine relation?
150         if (end_exp.var == kNoIntegerVariable || end_exp.coeff != 1) continue;
151         index_to_end_vars.push_back(end_exp.var);
152         index_to_task.push_back(t);
153       }
154       model->GetOrCreate<PrecedencesPropagator>()->ComputePrecedences(
155           index_to_end_vars, &before);
156       const int size = before.size();
157       for (int i = 0; i < size;) {
158         const IntegerVariable var = before[i].var;
159         DCHECK_NE(var, kNoIntegerVariable);
160 
161         IntegerValue min_offset = kMaxIntegerValue;
162         std::vector<int> subtasks;
163         for (; i < size && before[i].var == var; ++i) {
164           const int t = index_to_task[before[i].index];
165           subtasks.push_back(t);
166 
167           // We have var >= end_exp.var + offset, so
168           // var >= (end_exp.var + end_exp.cte) + (offset - end_exp.cte)
169           // var >= task end + new_offset.
170           const AffineExpression& end_exp = helper->Ends()[t];
171           min_offset =
172               std::min(min_offset, before[i].offset - end_exp.constant);
173         }
174 
175         if (subtasks.size() > 1) {
176           CumulativeIsAfterSubsetConstraint* constraint =
177               new CumulativeIsAfterSubsetConstraint(var, min_offset, capacity,
178                                                     demands, subtasks,
179                                                     integer_trail, helper);
180           constraint->RegisterWith(watcher);
181           model->TakeOwnership(constraint);
182         }
183       }
184     }
185 
186     // Propagator responsible for applying Timetabling filtering rule. It
187     // increases the minimum of the start variables, decrease the maximum of the
188     // end variables, and increase the minimum of the capacity variable.
189     TimeTablingPerTask* time_tabling =
190         new TimeTablingPerTask(demands, capacity, integer_trail, helper);
191     time_tabling->RegisterWith(watcher);
192     model->TakeOwnership(time_tabling);
193 
194     // Propagator responsible for applying the Overload Checking filtering rule.
195     // It increases the minimum of the capacity variable.
196     if (parameters.use_overload_checker_in_cumulative_constraint()) {
197       AddCumulativeOverloadChecker(demands, capacity, helper, model);
198     }
199 
200     // Propagator responsible for applying the Timetable Edge finding filtering
201     // rule. It increases the minimum of the start variables and decreases the
202     // maximum of the end variables,
203     if (parameters.use_timetable_edge_finding_in_cumulative_constraint()) {
204       TimeTableEdgeFinding* time_table_edge_finding =
205           new TimeTableEdgeFinding(demands, capacity, helper, integer_trail);
206       time_table_edge_finding->RegisterWith(watcher);
207       model->TakeOwnership(time_table_edge_finding);
208     }
209   };
210 }
211 
CumulativeTimeDecomposition(const std::vector<IntervalVariable> & vars,const std::vector<AffineExpression> & demands,AffineExpression capacity,SchedulingConstraintHelper * helper)212 std::function<void(Model*)> CumulativeTimeDecomposition(
213     const std::vector<IntervalVariable>& vars,
214     const std::vector<AffineExpression>& demands, AffineExpression capacity,
215     SchedulingConstraintHelper* helper) {
216   return [=](Model* model) {
217     if (vars.empty()) return;
218 
219     IntegerTrail* integer_trail = model->GetOrCreate<IntegerTrail>();
220     CHECK(integer_trail->IsFixed(capacity));
221     const Coefficient fixed_capacity(
222         integer_trail->UpperBound(capacity).value());
223 
224     const int num_tasks = vars.size();
225     SatSolver* sat_solver = model->GetOrCreate<SatSolver>();
226     IntegerEncoder* encoder = model->GetOrCreate<IntegerEncoder>();
227     IntervalsRepository* intervals = model->GetOrCreate<IntervalsRepository>();
228 
229     std::vector<IntegerVariable> start_vars;
230     std::vector<IntegerVariable> end_vars;
231     std::vector<IntegerValue> fixed_demands;
232 
233     for (int t = 0; t < num_tasks; ++t) {
234       start_vars.push_back(intervals->StartVar(vars[t]));
235       end_vars.push_back(intervals->EndVar(vars[t]));
236       CHECK(integer_trail->IsFixed(demands[t]));
237       fixed_demands.push_back(integer_trail->LowerBound(demands[t]));
238     }
239 
240     // Compute time range.
241     IntegerValue min_start = kMaxIntegerValue;
242     IntegerValue max_end = kMinIntegerValue;
243     for (int t = 0; t < num_tasks; ++t) {
244       min_start = std::min(min_start, integer_trail->LowerBound(start_vars[t]));
245       max_end = std::max(max_end, integer_trail->UpperBound(end_vars[t]));
246     }
247 
248     for (IntegerValue time = min_start; time < max_end; ++time) {
249       std::vector<LiteralWithCoeff> literals_with_coeff;
250       for (int t = 0; t < num_tasks; ++t) {
251         sat_solver->Propagate();
252         const IntegerValue start_min = integer_trail->LowerBound(start_vars[t]);
253         const IntegerValue end_max = integer_trail->UpperBound(end_vars[t]);
254         if (end_max <= time || time < start_min || fixed_demands[t] == 0) {
255           continue;
256         }
257 
258         // Task t consumes the resource at time if consume_condition is true.
259         std::vector<Literal> consume_condition;
260         const Literal consume = Literal(model->Add(NewBooleanVariable()), true);
261 
262         // Task t consumes the resource at time if it is present.
263         if (intervals->IsOptional(vars[t])) {
264           consume_condition.push_back(intervals->PresenceLiteral(vars[t]));
265         }
266 
267         // Task t overlaps time.
268         consume_condition.push_back(encoder->GetOrCreateAssociatedLiteral(
269             IntegerLiteral::LowerOrEqual(start_vars[t], IntegerValue(time))));
270         consume_condition.push_back(encoder->GetOrCreateAssociatedLiteral(
271             IntegerLiteral::GreaterOrEqual(end_vars[t],
272                                            IntegerValue(time + 1))));
273 
274         model->Add(ReifiedBoolAnd(consume_condition, consume));
275 
276         // TODO(user): this is needed because we currently can't create a
277         // boolean variable if the model is unsat.
278         if (sat_solver->IsModelUnsat()) return;
279 
280         literals_with_coeff.push_back(
281             LiteralWithCoeff(consume, Coefficient(fixed_demands[t].value())));
282       }
283       // The profile cannot exceed the capacity at time.
284       sat_solver->AddLinearConstraint(false, Coefficient(0), true,
285                                       fixed_capacity, &literals_with_coeff);
286 
287       // Abort if UNSAT.
288       if (sat_solver->IsModelUnsat()) return;
289     }
290   };
291 }
292 
CumulativeUsingReservoir(const std::vector<IntervalVariable> & vars,const std::vector<AffineExpression> & demands,AffineExpression capacity,SchedulingConstraintHelper * helper)293 std::function<void(Model*)> CumulativeUsingReservoir(
294     const std::vector<IntervalVariable>& vars,
295     const std::vector<AffineExpression>& demands, AffineExpression capacity,
296     SchedulingConstraintHelper* helper) {
297   return [=](Model* model) {
298     if (vars.empty()) return;
299 
300     auto* integer_trail = model->GetOrCreate<IntegerTrail>();
301     auto* encoder = model->GetOrCreate<IntegerEncoder>();
302     auto* intervals = model->GetOrCreate<IntervalsRepository>();
303 
304     CHECK(integer_trail->IsFixed(capacity));
305     const IntegerValue fixed_capacity(
306         integer_trail->UpperBound(capacity).value());
307 
308     std::vector<AffineExpression> times;
309     std::vector<IntegerValue> deltas;
310     std::vector<Literal> presences;
311 
312     const int num_tasks = vars.size();
313     for (int t = 0; t < num_tasks; ++t) {
314       CHECK(integer_trail->IsFixed(demands[t]));
315       times.push_back(intervals->StartVar(vars[t]));
316       deltas.push_back(integer_trail->LowerBound(demands[t]));
317       times.push_back(intervals->EndVar(vars[t]));
318       deltas.push_back(-integer_trail->LowerBound(demands[t]));
319       if (intervals->IsOptional(vars[t])) {
320         presences.push_back(intervals->PresenceLiteral(vars[t]));
321         presences.push_back(intervals->PresenceLiteral(vars[t]));
322       } else {
323         presences.push_back(encoder->GetTrueLiteral());
324         presences.push_back(encoder->GetTrueLiteral());
325       }
326     }
327     AddReservoirConstraint(times, deltas, presences, 0, fixed_capacity.value(),
328                            model);
329   };
330 }
331 
332 }  // namespace sat
333 }  // namespace operations_research
334