1 // Copyright 2010-2021 Google LLC
2 // Licensed under the Apache License, Version 2.0 (the "License");
3 // you may not use this file except in compliance with the License.
4 // You may obtain a copy of the License at
5 //
6 // http://www.apache.org/licenses/LICENSE-2.0
7 //
8 // Unless required by applicable law or agreed to in writing, software
9 // distributed under the License is distributed on an "AS IS" BASIS,
10 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 // See the License for the specific language governing permissions and
12 // limitations under the License.
13
14 // This file contains implementations of several resource constraints.
15 // The implemented constraints are:
16 // * Disjunctive: forces a set of intervals to be non-overlapping
17 // * Cumulative: forces a set of intervals with associated demands to be such
18 // that the sum of demands of the intervals containing any given integer
19 // does not exceed a capacity.
20 // In addition, it implements the SequenceVar that allows ranking decisions
21 // on a set of interval variables.
22
23 #include <algorithm>
24 #include <cstdint>
25 #include <limits>
26 #include <queue>
27 #include <string>
28 #include <utility>
29 #include <vector>
30
31 #include "absl/container/flat_hash_map.h"
32 #include "absl/strings/str_cat.h"
33 #include "absl/strings/str_format.h"
34 #include "absl/strings/str_join.h"
35 #include "ortools/base/commandlineflags.h"
36 #include "ortools/base/integral_types.h"
37 #include "ortools/base/logging.h"
38 #include "ortools/base/macros.h"
39 #include "ortools/base/mathutil.h"
40 #include "ortools/base/stl_util.h"
41 #include "ortools/constraint_solver/constraint_solver.h"
42 #include "ortools/constraint_solver/constraint_solveri.h"
43 #include "ortools/util/bitset.h"
44 #include "ortools/util/monoid_operation_tree.h"
45 #include "ortools/util/saturated_arithmetic.h"
46 #include "ortools/util/string_array.h"
47
48 namespace operations_research {
49 namespace {
50 // ----- Comparison functions -----
51
52 // TODO(user): Tie breaking.
53
54 // Comparison methods, used by the STL sort.
55 template <class Task>
StartMinLessThan(Task * const w1,Task * const w2)56 bool StartMinLessThan(Task* const w1, Task* const w2) {
57 return (w1->interval->StartMin() < w2->interval->StartMin());
58 }
59
60 // A comparator that sorts the tasks by their effective earliest start time when
61 // using the shortest duration possible. This comparator can be used when
62 // sorting the tasks before they are inserted to a Theta-tree.
63 template <class Task>
ShortestDurationStartMinLessThan(Task * const w1,Task * const w2)64 bool ShortestDurationStartMinLessThan(Task* const w1, Task* const w2) {
65 return w1->interval->EndMin() - w1->interval->DurationMin() <
66 w2->interval->EndMin() - w2->interval->DurationMin();
67 }
68
69 template <class Task>
StartMaxLessThan(Task * const w1,Task * const w2)70 bool StartMaxLessThan(Task* const w1, Task* const w2) {
71 return (w1->interval->StartMax() < w2->interval->StartMax());
72 }
73
74 template <class Task>
EndMinLessThan(Task * const w1,Task * const w2)75 bool EndMinLessThan(Task* const w1, Task* const w2) {
76 return (w1->interval->EndMin() < w2->interval->EndMin());
77 }
78
79 template <class Task>
EndMaxLessThan(Task * const w1,Task * const w2)80 bool EndMaxLessThan(Task* const w1, Task* const w2) {
81 return (w1->interval->EndMax() < w2->interval->EndMax());
82 }
83
IntervalStartMinLessThan(IntervalVar * i1,IntervalVar * i2)84 bool IntervalStartMinLessThan(IntervalVar* i1, IntervalVar* i2) {
85 return i1->StartMin() < i2->StartMin();
86 }
87
88 // ----- Wrappers around intervals -----
89
90 // A DisjunctiveTask is a non-preemptive task sharing a disjunctive resource.
91 // That is, it corresponds to an interval, and this interval cannot overlap with
92 // any other interval of a DisjunctiveTask sharing the same resource.
93 // It is indexed, that is it is aware of its position in a reference array.
94 struct DisjunctiveTask {
DisjunctiveTaskoperations_research::__anon726a53860111::DisjunctiveTask95 explicit DisjunctiveTask(IntervalVar* const interval_)
96 : interval(interval_), index(-1) {}
97
DebugStringoperations_research::__anon726a53860111::DisjunctiveTask98 std::string DebugString() const { return interval->DebugString(); }
99
100 IntervalVar* interval;
101 int index;
102 };
103
104 // A CumulativeTask is a non-preemptive task sharing a cumulative resource.
105 // That is, it corresponds to an interval and a demand. The sum of demands of
106 // all cumulative tasks CumulativeTasks sharing a resource of capacity c those
107 // intervals contain any integer t cannot exceed c.
108 // It is indexed, that is it is aware of its position in a reference array.
109 struct CumulativeTask {
CumulativeTaskoperations_research::__anon726a53860111::CumulativeTask110 CumulativeTask(IntervalVar* const interval_, int64_t demand_)
111 : interval(interval_), demand(demand_), index(-1) {}
112
EnergyMinoperations_research::__anon726a53860111::CumulativeTask113 int64_t EnergyMin() const { return interval->DurationMin() * demand; }
114
DemandMinoperations_research::__anon726a53860111::CumulativeTask115 int64_t DemandMin() const { return demand; }
116
WhenAnythingoperations_research::__anon726a53860111::CumulativeTask117 void WhenAnything(Demon* const demon) { interval->WhenAnything(demon); }
118
DebugStringoperations_research::__anon726a53860111::CumulativeTask119 std::string DebugString() const {
120 return absl::StrFormat("Task{ %s, demand: %d }", interval->DebugString(),
121 demand);
122 }
123
124 IntervalVar* interval;
125 int64_t demand;
126 int index;
127 };
128
129 // A VariableCumulativeTask is a non-preemptive task sharing a
130 // cumulative resource. That is, it corresponds to an interval and a
131 // demand. The sum of demands of all cumulative tasks
132 // VariableCumulativeTasks sharing a resource of capacity c whose
133 // intervals contain any integer t cannot exceed c. It is indexed,
134 // that is it is aware of its position in a reference array.
135 struct VariableCumulativeTask {
VariableCumulativeTaskoperations_research::__anon726a53860111::VariableCumulativeTask136 VariableCumulativeTask(IntervalVar* const interval_, IntVar* demand_)
137 : interval(interval_), demand(demand_), index(-1) {}
138
EnergyMinoperations_research::__anon726a53860111::VariableCumulativeTask139 int64_t EnergyMin() const { return interval->DurationMin() * demand->Min(); }
140
DemandMinoperations_research::__anon726a53860111::VariableCumulativeTask141 int64_t DemandMin() const { return demand->Min(); }
142
WhenAnythingoperations_research::__anon726a53860111::VariableCumulativeTask143 void WhenAnything(Demon* const demon) {
144 interval->WhenAnything(demon);
145 demand->WhenRange(demon);
146 }
147
DebugStringoperations_research::__anon726a53860111::VariableCumulativeTask148 std::string DebugString() const {
149 return absl::StrFormat("Task{ %s, demand: %s }", interval->DebugString(),
150 demand->DebugString());
151 }
152
153 IntervalVar* const interval;
154 IntVar* const demand;
155 int index;
156 };
157
158 // ---------- Theta-Trees ----------
159
160 // This is based on Petr Vilim (public) PhD work.
161 // All names comes from his work. See http://vilim.eu/petr.
162
163 // Node of a Theta-tree
164 struct ThetaNode {
165 // Identity element
ThetaNodeoperations_research::__anon726a53860111::ThetaNode166 ThetaNode()
167 : total_processing(0), total_ect(std::numeric_limits<int64_t>::min()) {}
168
169 // Single interval element
ThetaNodeoperations_research::__anon726a53860111::ThetaNode170 explicit ThetaNode(const IntervalVar* const interval)
171 : total_processing(interval->DurationMin()),
172 total_ect(interval->EndMin()) {
173 // NOTE(user): Petr Vilim's thesis assumes that all tasks in the
174 // scheduling problem have fixed duration and that propagation already
175 // updated the bounds of the start/end times accordingly.
176 // The problem in this case is that the recursive formula for computing
177 // total_ect was only proved for the case where the duration is fixed; in
178 // our case, we use StartMin() + DurationMin() for the earliest completion
179 // time of a task, which should not break any assumptions, but may give
180 // bounds that are too loose.
181 }
182
Computeoperations_research::__anon726a53860111::ThetaNode183 void Compute(const ThetaNode& left, const ThetaNode& right) {
184 total_processing = CapAdd(left.total_processing, right.total_processing);
185 total_ect = std::max(CapAdd(left.total_ect, right.total_processing),
186 right.total_ect);
187 }
188
IsIdentityoperations_research::__anon726a53860111::ThetaNode189 bool IsIdentity() const {
190 return total_processing == 0LL &&
191 total_ect == std::numeric_limits<int64_t>::min();
192 }
193
DebugStringoperations_research::__anon726a53860111::ThetaNode194 std::string DebugString() const {
195 return absl::StrCat("ThetaNode{ p = ", total_processing,
196 ", e = ", total_ect < 0LL ? -1LL : total_ect, " }");
197 }
198
199 int64_t total_processing;
200 int64_t total_ect;
201 };
202
203 // A theta-tree is a container for a set of intervals supporting the following
204 // operations:
205 // * Insertions and deletion in O(log size_), with size_ the maximal number of
206 // tasks the tree may contain;
207 // * Querying the following quantity in O(1):
208 // Max_{subset S of the set of contained intervals} (
209 // Min_{i in S}(i.StartMin) + Sum_{i in S}(i.DurationMin) )
210 class ThetaTree : public MonoidOperationTree<ThetaNode> {
211 public:
ThetaTree(int size)212 explicit ThetaTree(int size) : MonoidOperationTree<ThetaNode>(size) {}
213
Ect() const214 int64_t Ect() const { return result().total_ect; }
215
Insert(const DisjunctiveTask * const task)216 void Insert(const DisjunctiveTask* const task) {
217 Set(task->index, ThetaNode(task->interval));
218 }
219
Remove(const DisjunctiveTask * const task)220 void Remove(const DisjunctiveTask* const task) { Reset(task->index); }
221
IsInserted(const DisjunctiveTask * const task) const222 bool IsInserted(const DisjunctiveTask* const task) const {
223 return !GetOperand(task->index).IsIdentity();
224 }
225 };
226
227 // ----------------- Lambda Theta Tree -----------------------
228
229 // Lambda-theta-node
230 // These nodes are cumulative lambda theta-node. This is reflected in the
231 // terminology. They can also be used in the disjunctive case, and this incurs
232 // no performance penalty.
233 struct LambdaThetaNode {
234 // Special value for task indices meaning 'no such task'.
235 static const int kNone;
236
237 // Identity constructor
LambdaThetaNodeoperations_research::__anon726a53860111::LambdaThetaNode238 LambdaThetaNode()
239 : energy(0LL),
240 energetic_end_min(std::numeric_limits<int64_t>::min()),
241 energy_opt(0LL),
242 argmax_energy_opt(kNone),
243 energetic_end_min_opt(std::numeric_limits<int64_t>::min()),
244 argmax_energetic_end_min_opt(kNone) {}
245
246 // Constructor for a single cumulative task in the Theta set
LambdaThetaNodeoperations_research::__anon726a53860111::LambdaThetaNode247 LambdaThetaNode(int64_t capacity, const CumulativeTask& task)
248 : energy(task.EnergyMin()),
249 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
250 energy_opt(energy),
251 argmax_energy_opt(kNone),
252 energetic_end_min_opt(energetic_end_min),
253 argmax_energetic_end_min_opt(kNone) {}
254
255 // Constructor for a single cumulative task in the Lambda set
LambdaThetaNodeoperations_research::__anon726a53860111::LambdaThetaNode256 LambdaThetaNode(int64_t capacity, const CumulativeTask& task, int index)
257 : energy(0LL),
258 energetic_end_min(std::numeric_limits<int64_t>::min()),
259 energy_opt(task.EnergyMin()),
260 argmax_energy_opt(index),
261 energetic_end_min_opt(capacity * task.interval->StartMin() +
262 energy_opt),
263 argmax_energetic_end_min_opt(index) {
264 DCHECK_GE(index, 0);
265 }
266
267 // Constructor for a single cumulative task in the Theta set
LambdaThetaNodeoperations_research::__anon726a53860111::LambdaThetaNode268 LambdaThetaNode(int64_t capacity, const VariableCumulativeTask& task)
269 : energy(task.EnergyMin()),
270 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
271 energy_opt(energy),
272 argmax_energy_opt(kNone),
273 energetic_end_min_opt(energetic_end_min),
274 argmax_energetic_end_min_opt(kNone) {}
275
276 // Constructor for a single cumulative task in the Lambda set
LambdaThetaNodeoperations_research::__anon726a53860111::LambdaThetaNode277 LambdaThetaNode(int64_t capacity, const VariableCumulativeTask& task,
278 int index)
279 : energy(0LL),
280 energetic_end_min(std::numeric_limits<int64_t>::min()),
281 energy_opt(task.EnergyMin()),
282 argmax_energy_opt(index),
283 energetic_end_min_opt(capacity * task.interval->StartMin() +
284 energy_opt),
285 argmax_energetic_end_min_opt(index) {
286 DCHECK_GE(index, 0);
287 }
288
289 // Constructor for a single interval in the Theta set
LambdaThetaNodeoperations_research::__anon726a53860111::LambdaThetaNode290 explicit LambdaThetaNode(const IntervalVar* const interval)
291 : energy(interval->DurationMin()),
292 energetic_end_min(interval->EndMin()),
293 energy_opt(interval->DurationMin()),
294 argmax_energy_opt(kNone),
295 energetic_end_min_opt(interval->EndMin()),
296 argmax_energetic_end_min_opt(kNone) {}
297
298 // Constructor for a single interval in the Lambda set
299 // 'index' is the index of the given interval in the est vector
LambdaThetaNodeoperations_research::__anon726a53860111::LambdaThetaNode300 LambdaThetaNode(const IntervalVar* const interval, int index)
301 : energy(0LL),
302 energetic_end_min(std::numeric_limits<int64_t>::min()),
303 energy_opt(interval->DurationMin()),
304 argmax_energy_opt(index),
305 energetic_end_min_opt(interval->EndMin()),
306 argmax_energetic_end_min_opt(index) {
307 DCHECK_GE(index, 0);
308 }
309
310 // Sets this LambdaThetaNode to the result of the natural binary operations
311 // over the two given operands, corresponding to the following set operations:
312 // Theta = left.Theta union right.Theta
313 // Lambda = left.Lambda union right.Lambda
314 //
315 // No set operation actually occur: we only maintain the relevant quantities
316 // associated with such sets.
Computeoperations_research::__anon726a53860111::LambdaThetaNode317 void Compute(const LambdaThetaNode& left, const LambdaThetaNode& right) {
318 energy = CapAdd(left.energy, right.energy);
319 energetic_end_min = std::max(right.energetic_end_min,
320 CapAdd(left.energetic_end_min, right.energy));
321 const int64_t energy_left_opt = CapAdd(left.energy_opt, right.energy);
322 const int64_t energy_right_opt = CapAdd(left.energy, right.energy_opt);
323 if (energy_left_opt > energy_right_opt) {
324 energy_opt = energy_left_opt;
325 argmax_energy_opt = left.argmax_energy_opt;
326 } else {
327 energy_opt = energy_right_opt;
328 argmax_energy_opt = right.argmax_energy_opt;
329 }
330 const int64_t ect1 = right.energetic_end_min_opt;
331 const int64_t ect2 = CapAdd(left.energetic_end_min, right.energy_opt);
332 const int64_t ect3 = CapAdd(left.energetic_end_min_opt, right.energy);
333 if (ect1 >= ect2 && ect1 >= ect3) { // ect1 max
334 energetic_end_min_opt = ect1;
335 argmax_energetic_end_min_opt = right.argmax_energetic_end_min_opt;
336 } else if (ect2 >= ect1 && ect2 >= ect3) { // ect2 max
337 energetic_end_min_opt = ect2;
338 argmax_energetic_end_min_opt = right.argmax_energy_opt;
339 } else { // ect3 max
340 energetic_end_min_opt = ect3;
341 argmax_energetic_end_min_opt = left.argmax_energetic_end_min_opt;
342 }
343 // The processing time, with one grey interval, should be no less than
344 // without any grey interval.
345 DCHECK(energy_opt >= energy);
346 // If there is no responsible grey interval for the processing time,
347 // the processing time with a grey interval should equal the one
348 // without.
349 DCHECK((argmax_energy_opt != kNone) || (energy_opt == energy));
350 }
351
352 // Amount of resource consumed by the Theta set, in units of demand X time.
353 // This is energy(Theta).
354 int64_t energy;
355
356 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
357 int64_t energetic_end_min;
358
359 // Max_{i in Lambda} (energy(Theta union {i}))
360 int64_t energy_opt;
361
362 // The argmax in energy_opt_. It is the index of the chosen task in the Lambda
363 // set, if any, or kNone if none.
364 int argmax_energy_opt;
365
366 // Max_{subset S of Theta, i in Lambda}
367 // (capacity * start_min(S union {i}) + energy(S union {i}))
368 int64_t energetic_end_min_opt;
369
370 // The argmax in energetic_end_min_opt_. It is the index of the chosen task in
371 // the Lambda set, if any, or kNone if none.
372 int argmax_energetic_end_min_opt;
373 };
374
375 const int LambdaThetaNode::kNone = -1;
376
377 // Disjunctive Lambda-Theta tree
378 class DisjunctiveLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
379 public:
DisjunctiveLambdaThetaTree(int size)380 explicit DisjunctiveLambdaThetaTree(int size)
381 : MonoidOperationTree<LambdaThetaNode>(size) {}
382
Insert(const DisjunctiveTask & task)383 void Insert(const DisjunctiveTask& task) {
384 Set(task.index, LambdaThetaNode(task.interval));
385 }
386
Grey(const DisjunctiveTask & task)387 void Grey(const DisjunctiveTask& task) {
388 const int index = task.index;
389 Set(index, LambdaThetaNode(task.interval, index));
390 }
391
Ect() const392 int64_t Ect() const { return result().energetic_end_min; }
EctOpt() const393 int64_t EctOpt() const { return result().energetic_end_min_opt; }
ResponsibleOpt() const394 int ResponsibleOpt() const { return result().argmax_energetic_end_min_opt; }
395 };
396
397 // A cumulative lambda-theta tree
398 class CumulativeLambdaThetaTree : public MonoidOperationTree<LambdaThetaNode> {
399 public:
CumulativeLambdaThetaTree(int size,int64_t capacity_max)400 CumulativeLambdaThetaTree(int size, int64_t capacity_max)
401 : MonoidOperationTree<LambdaThetaNode>(size),
402 capacity_max_(capacity_max) {}
403
Init(int64_t capacity_max)404 void Init(int64_t capacity_max) {
405 Clear();
406 capacity_max_ = capacity_max;
407 }
408
Insert(const CumulativeTask & task)409 void Insert(const CumulativeTask& task) {
410 Set(task.index, LambdaThetaNode(capacity_max_, task));
411 }
412
Grey(const CumulativeTask & task)413 void Grey(const CumulativeTask& task) {
414 const int index = task.index;
415 Set(index, LambdaThetaNode(capacity_max_, task, index));
416 }
417
Insert(const VariableCumulativeTask & task)418 void Insert(const VariableCumulativeTask& task) {
419 Set(task.index, LambdaThetaNode(capacity_max_, task));
420 }
421
Grey(const VariableCumulativeTask & task)422 void Grey(const VariableCumulativeTask& task) {
423 const int index = task.index;
424 Set(index, LambdaThetaNode(capacity_max_, task, index));
425 }
426
energetic_end_min() const427 int64_t energetic_end_min() const { return result().energetic_end_min; }
energetic_end_min_opt() const428 int64_t energetic_end_min_opt() const {
429 return result().energetic_end_min_opt;
430 }
Ect() const431 int64_t Ect() const {
432 return MathUtil::CeilOfRatio(energetic_end_min(), capacity_max_);
433 }
EctOpt() const434 int64_t EctOpt() const {
435 return MathUtil::CeilOfRatio(result().energetic_end_min_opt, capacity_max_);
436 }
argmax_energetic_end_min_opt() const437 int argmax_energetic_end_min_opt() const {
438 return result().argmax_energetic_end_min_opt;
439 }
440
441 private:
442 int64_t capacity_max_;
443 };
444
445 // -------------- Not Last -----------------------------------------
446
447 // A class that implements the 'Not-Last' propagation algorithm for the unary
448 // resource constraint.
449 class NotLast {
450 public:
451 NotLast(Solver* const solver, const std::vector<IntervalVar*>& intervals,
452 bool mirror, bool strict);
453
~NotLast()454 ~NotLast() { gtl::STLDeleteElements(&by_start_min_); }
455
456 bool Propagate();
457
458 private:
459 ThetaTree theta_tree_;
460 std::vector<DisjunctiveTask*> by_start_min_;
461 std::vector<DisjunctiveTask*> by_end_max_;
462 std::vector<DisjunctiveTask*> by_start_max_;
463 std::vector<int64_t> new_lct_;
464 const bool strict_;
465 };
466
NotLast(Solver * const solver,const std::vector<IntervalVar * > & intervals,bool mirror,bool strict)467 NotLast::NotLast(Solver* const solver,
468 const std::vector<IntervalVar*>& intervals, bool mirror,
469 bool strict)
470 : theta_tree_(intervals.size()),
471 by_start_min_(intervals.size()),
472 by_end_max_(intervals.size()),
473 by_start_max_(intervals.size()),
474 new_lct_(intervals.size(), -1LL),
475 strict_(strict) {
476 // Populate the different vectors.
477 for (int i = 0; i < intervals.size(); ++i) {
478 IntervalVar* const underlying =
479 mirror ? solver->MakeMirrorInterval(intervals[i]) : intervals[i];
480 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMin(underlying);
481 by_start_min_[i] = new DisjunctiveTask(relaxed);
482 by_end_max_[i] = by_start_min_[i];
483 by_start_max_[i] = by_start_min_[i];
484 }
485 }
486
Propagate()487 bool NotLast::Propagate() {
488 // ---- Init ----
489 std::sort(by_start_max_.begin(), by_start_max_.end(),
490 StartMaxLessThan<DisjunctiveTask>);
491 std::sort(by_end_max_.begin(), by_end_max_.end(),
492 EndMaxLessThan<DisjunctiveTask>);
493 // Update start min positions
494 std::sort(by_start_min_.begin(), by_start_min_.end(),
495 StartMinLessThan<DisjunctiveTask>);
496 for (int i = 0; i < by_start_min_.size(); ++i) {
497 by_start_min_[i]->index = i;
498 }
499 theta_tree_.Clear();
500 for (int i = 0; i < by_start_min_.size(); ++i) {
501 new_lct_[i] = by_start_min_[i]->interval->EndMax();
502 }
503
504 // --- Execute ----
505 int j = 0;
506 for (DisjunctiveTask* const twi : by_end_max_) {
507 while (j < by_start_max_.size() &&
508 twi->interval->EndMax() > by_start_max_[j]->interval->StartMax()) {
509 if (j > 0 && theta_tree_.Ect() > by_start_max_[j]->interval->StartMax()) {
510 const int64_t new_end_max = by_start_max_[j - 1]->interval->StartMax();
511 new_lct_[by_start_max_[j]->index] = new_end_max;
512 }
513 theta_tree_.Insert(by_start_max_[j]);
514 j++;
515 }
516 const bool inserted = theta_tree_.IsInserted(twi);
517 if (inserted) {
518 theta_tree_.Remove(twi);
519 }
520 const int64_t ect_theta_less_i = theta_tree_.Ect();
521 if (inserted) {
522 theta_tree_.Insert(twi);
523 }
524 if (ect_theta_less_i > twi->interval->EndMax() && j > 0) {
525 const int64_t new_end_max = by_start_max_[j - 1]->interval->EndMax();
526 if (new_end_max > new_lct_[twi->index]) {
527 new_lct_[twi->index] = new_end_max;
528 }
529 }
530 }
531
532 // Apply modifications
533 bool modified = false;
534 for (int i = 0; i < by_start_min_.size(); ++i) {
535 IntervalVar* const var = by_start_min_[i]->interval;
536 if ((strict_ || var->DurationMin() > 0) && var->EndMax() > new_lct_[i]) {
537 modified = true;
538 var->SetEndMax(new_lct_[i]);
539 }
540 }
541 return modified;
542 }
543
544 // ------ Edge finder + detectable precedences -------------
545
546 // A class that implements two propagation algorithms: edge finding and
547 // detectable precedences. These algorithms both push intervals to the right,
548 // which is why they are grouped together.
549 class EdgeFinderAndDetectablePrecedences {
550 public:
551 EdgeFinderAndDetectablePrecedences(Solver* const solver,
552 const std::vector<IntervalVar*>& intervals,
553 bool mirror, bool strict);
~EdgeFinderAndDetectablePrecedences()554 ~EdgeFinderAndDetectablePrecedences() {
555 gtl::STLDeleteElements(&by_start_min_);
556 }
size() const557 int64_t size() const { return by_start_min_.size(); }
interval(int index)558 IntervalVar* interval(int index) { return by_start_min_[index]->interval; }
559 void UpdateEst();
560 void OverloadChecking();
561 bool DetectablePrecedences();
562 bool EdgeFinder();
563
564 private:
565 Solver* const solver_;
566
567 // --- All the following member variables are essentially used as local ones:
568 // no invariant is maintained about them, except for the fact that the vectors
569 // always contains all the considered intervals, so any function that wants to
570 // use them must first sort them in the right order.
571
572 // All of these vectors store the same set of objects. Therefore, at
573 // destruction time, STLDeleteElements should be called on only one of them.
574 // It does not matter which one.
575
576 ThetaTree theta_tree_;
577 std::vector<DisjunctiveTask*> by_end_min_;
578 std::vector<DisjunctiveTask*> by_start_min_;
579 std::vector<DisjunctiveTask*> by_end_max_;
580 std::vector<DisjunctiveTask*> by_start_max_;
581 // new_est_[i] is the new start min for interval est_[i]->interval.
582 std::vector<int64_t> new_est_;
583 // new_lct_[i] is the new end max for interval est_[i]->interval.
584 std::vector<int64_t> new_lct_;
585 DisjunctiveLambdaThetaTree lt_tree_;
586 const bool strict_;
587 };
588
EdgeFinderAndDetectablePrecedences(Solver * const solver,const std::vector<IntervalVar * > & intervals,bool mirror,bool strict)589 EdgeFinderAndDetectablePrecedences::EdgeFinderAndDetectablePrecedences(
590 Solver* const solver, const std::vector<IntervalVar*>& intervals,
591 bool mirror, bool strict)
592 : solver_(solver),
593 theta_tree_(intervals.size()),
594 lt_tree_(intervals.size()),
595 strict_(strict) {
596 // Populate of the array of intervals
597 for (IntervalVar* const interval : intervals) {
598 IntervalVar* const underlying =
599 mirror ? solver->MakeMirrorInterval(interval) : interval;
600 IntervalVar* const relaxed = solver->MakeIntervalRelaxedMax(underlying);
601 DisjunctiveTask* const task = new DisjunctiveTask(relaxed);
602 by_end_min_.push_back(task);
603 by_start_min_.push_back(task);
604 by_end_max_.push_back(task);
605 by_start_max_.push_back(task);
606 new_est_.push_back(std::numeric_limits<int64_t>::min());
607 }
608 }
609
UpdateEst()610 void EdgeFinderAndDetectablePrecedences::UpdateEst() {
611 std::sort(by_start_min_.begin(), by_start_min_.end(),
612 ShortestDurationStartMinLessThan<DisjunctiveTask>);
613 for (int i = 0; i < size(); ++i) {
614 by_start_min_[i]->index = i;
615 }
616 }
617
OverloadChecking()618 void EdgeFinderAndDetectablePrecedences::OverloadChecking() {
619 // Initialization.
620 UpdateEst();
621 std::sort(by_end_max_.begin(), by_end_max_.end(),
622 EndMaxLessThan<DisjunctiveTask>);
623 theta_tree_.Clear();
624
625 for (DisjunctiveTask* const task : by_end_max_) {
626 theta_tree_.Insert(task);
627 if (theta_tree_.Ect() > task->interval->EndMax()) {
628 solver_->Fail();
629 }
630 }
631 }
632
DetectablePrecedences()633 bool EdgeFinderAndDetectablePrecedences::DetectablePrecedences() {
634 // Initialization.
635 UpdateEst();
636 new_est_.assign(size(), std::numeric_limits<int64_t>::min());
637
638 // Propagate in one direction
639 std::sort(by_end_min_.begin(), by_end_min_.end(),
640 EndMinLessThan<DisjunctiveTask>);
641 std::sort(by_start_max_.begin(), by_start_max_.end(),
642 StartMaxLessThan<DisjunctiveTask>);
643 theta_tree_.Clear();
644 int j = 0;
645 for (DisjunctiveTask* const task_i : by_end_min_) {
646 if (j < size()) {
647 DisjunctiveTask* task_j = by_start_max_[j];
648 while (task_i->interval->EndMin() > task_j->interval->StartMax()) {
649 theta_tree_.Insert(task_j);
650 j++;
651 if (j == size()) break;
652 task_j = by_start_max_[j];
653 }
654 }
655 const int64_t esti = task_i->interval->StartMin();
656 bool inserted = theta_tree_.IsInserted(task_i);
657 if (inserted) {
658 theta_tree_.Remove(task_i);
659 }
660 const int64_t oesti = theta_tree_.Ect();
661 if (inserted) {
662 theta_tree_.Insert(task_i);
663 }
664 if (oesti > esti) {
665 new_est_[task_i->index] = oesti;
666 } else {
667 new_est_[task_i->index] = std::numeric_limits<int64_t>::min();
668 }
669 }
670
671 // Apply modifications
672 bool modified = false;
673 for (int i = 0; i < size(); ++i) {
674 IntervalVar* const var = by_start_min_[i]->interval;
675 if (new_est_[i] != std::numeric_limits<int64_t>::min() &&
676 (strict_ || var->DurationMin() > 0)) {
677 modified = true;
678 by_start_min_[i]->interval->SetStartMin(new_est_[i]);
679 }
680 }
681 return modified;
682 }
683
EdgeFinder()684 bool EdgeFinderAndDetectablePrecedences::EdgeFinder() {
685 // Initialization.
686 UpdateEst();
687 for (int i = 0; i < size(); ++i) {
688 new_est_[i] = by_start_min_[i]->interval->StartMin();
689 }
690
691 // Push in one direction.
692 std::sort(by_end_max_.begin(), by_end_max_.end(),
693 EndMaxLessThan<DisjunctiveTask>);
694 lt_tree_.Clear();
695 for (int i = 0; i < size(); ++i) {
696 lt_tree_.Insert(*by_start_min_[i]);
697 DCHECK_EQ(i, by_start_min_[i]->index);
698 }
699 for (int j = size() - 2; j >= 0; --j) {
700 lt_tree_.Grey(*by_end_max_[j + 1]);
701 DisjunctiveTask* const twj = by_end_max_[j];
702 // We should have checked for overloading earlier.
703 DCHECK_LE(lt_tree_.Ect(), twj->interval->EndMax());
704 while (lt_tree_.EctOpt() > twj->interval->EndMax()) {
705 const int i = lt_tree_.ResponsibleOpt();
706 DCHECK_GE(i, 0);
707 if (lt_tree_.Ect() > new_est_[i]) {
708 new_est_[i] = lt_tree_.Ect();
709 }
710 lt_tree_.Reset(i);
711 }
712 }
713
714 // Apply modifications.
715 bool modified = false;
716 for (int i = 0; i < size(); ++i) {
717 IntervalVar* const var = by_start_min_[i]->interval;
718 if (var->StartMin() < new_est_[i] && (strict_ || var->DurationMin() > 0)) {
719 modified = true;
720 var->SetStartMin(new_est_[i]);
721 }
722 }
723 return modified;
724 }
725
726 // --------- Disjunctive Constraint ----------
727
728 // ----- Propagation on ranked activities -----
729
730 class RankedPropagator : public Constraint {
731 public:
RankedPropagator(Solver * const solver,const std::vector<IntVar * > & nexts,const std::vector<IntervalVar * > & intervals,const std::vector<IntVar * > & slacks,DisjunctiveConstraint * const disjunctive)732 RankedPropagator(Solver* const solver, const std::vector<IntVar*>& nexts,
733 const std::vector<IntervalVar*>& intervals,
734 const std::vector<IntVar*>& slacks,
735 DisjunctiveConstraint* const disjunctive)
736 : Constraint(solver),
737 nexts_(nexts),
738 intervals_(intervals),
739 slacks_(slacks),
740 disjunctive_(disjunctive),
741 partial_sequence_(intervals.size()),
742 previous_(intervals.size() + 2, 0) {}
743
~RankedPropagator()744 ~RankedPropagator() override {}
745
Post()746 void Post() override {
747 Demon* const delayed =
748 solver()->MakeDelayedConstraintInitialPropagateCallback(this);
749 for (int i = 0; i < intervals_.size(); ++i) {
750 nexts_[i]->WhenBound(delayed);
751 intervals_[i]->WhenAnything(delayed);
752 slacks_[i]->WhenRange(delayed);
753 }
754 nexts_.back()->WhenBound(delayed);
755 }
756
InitialPropagate()757 void InitialPropagate() override {
758 PropagateNexts();
759 PropagateSequence();
760 }
761
PropagateNexts()762 void PropagateNexts() {
763 Solver* const s = solver();
764 const int ranked_first = partial_sequence_.NumFirstRanked();
765 const int ranked_last = partial_sequence_.NumLastRanked();
766 const int sentinel =
767 ranked_last == 0
768 ? nexts_.size()
769 : partial_sequence_[intervals_.size() - ranked_last] + 1;
770 int first = 0;
771 int counter = 0;
772 while (nexts_[first]->Bound()) {
773 DCHECK_NE(first, nexts_[first]->Min());
774 first = nexts_[first]->Min();
775 if (first == sentinel) {
776 return;
777 }
778 if (++counter > ranked_first) {
779 DCHECK(intervals_[first - 1]->MayBePerformed());
780 partial_sequence_.RankFirst(s, first - 1);
781 VLOG(2) << "RankFirst " << first - 1 << " -> "
782 << partial_sequence_.DebugString();
783 }
784 }
785 previous_.assign(previous_.size(), -1);
786 for (int i = 0; i < nexts_.size(); ++i) {
787 if (nexts_[i]->Bound()) {
788 previous_[nexts_[i]->Min()] = i;
789 }
790 }
791 int last = previous_.size() - 1;
792 counter = 0;
793 while (previous_[last] != -1) {
794 last = previous_[last];
795 if (++counter > ranked_last) {
796 partial_sequence_.RankLast(s, last - 1);
797 VLOG(2) << "RankLast " << last - 1 << " -> "
798 << partial_sequence_.DebugString();
799 }
800 }
801 }
802
PropagateSequence()803 void PropagateSequence() {
804 const int last_position = intervals_.size() - 1;
805 const int first_sentinel = partial_sequence_.NumFirstRanked();
806 const int last_sentinel = last_position - partial_sequence_.NumLastRanked();
807 // Propagates on ranked first from left to right.
808 for (int i = 0; i < first_sentinel - 1; ++i) {
809 IntervalVar* const interval = RankedInterval(i);
810 IntervalVar* const next_interval = RankedInterval(i + 1);
811 IntVar* const slack = RankedSlack(i);
812 const int64_t transition_time = RankedTransitionTime(i, i + 1);
813 next_interval->SetStartRange(
814 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
815 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
816 }
817 // Propagates on ranked last from right to left.
818 for (int i = last_position; i > last_sentinel + 1; --i) {
819 IntervalVar* const interval = RankedInterval(i - 1);
820 IntervalVar* const next_interval = RankedInterval(i);
821 IntVar* const slack = RankedSlack(i - 1);
822 const int64_t transition_time = RankedTransitionTime(i - 1, i);
823 interval->SetStartRange(CapSub(next_interval->StartMin(),
824 CapAdd(slack->Max(), transition_time)),
825 CapSub(next_interval->StartMax(),
826 CapAdd(slack->Min(), transition_time)));
827 }
828 // Propagate across.
829 IntervalVar* const first_interval =
830 first_sentinel > 0 ? RankedInterval(first_sentinel - 1) : nullptr;
831 IntVar* const first_slack =
832 first_sentinel > 0 ? RankedSlack(first_sentinel - 1) : nullptr;
833 IntervalVar* const last_interval = last_sentinel < last_position
834 ? RankedInterval(last_sentinel + 1)
835 : nullptr;
836
837 // Nothing to do afterwards, exiting.
838 if (first_interval == nullptr && last_interval == nullptr) {
839 return;
840 }
841 // Propagates to the middle part.
842 // This assumes triangular inequality in the transition times.
843 for (int i = first_sentinel; i <= last_sentinel; ++i) {
844 IntervalVar* const interval = RankedInterval(i);
845 IntVar* const slack = RankedSlack(i);
846 if (interval->MayBePerformed()) {
847 const bool performed = interval->MustBePerformed();
848 if (first_interval != nullptr) {
849 const int64_t transition_time =
850 RankedTransitionTime(first_sentinel - 1, i);
851 interval->SetStartRange(
852 CapAdd(first_interval->StartMin(),
853 CapAdd(first_slack->Min(), transition_time)),
854 CapAdd(first_interval->StartMax(),
855 CapAdd(first_slack->Max(), transition_time)));
856 if (performed) {
857 first_interval->SetStartRange(
858 CapSub(interval->StartMin(),
859 CapAdd(first_slack->Max(), transition_time)),
860 CapSub(interval->StartMax(),
861 CapAdd(first_slack->Min(), transition_time)));
862 }
863 }
864 if (last_interval != nullptr) {
865 const int64_t transition_time =
866 RankedTransitionTime(i, last_sentinel + 1);
867 interval->SetStartRange(
868 CapSub(last_interval->StartMin(),
869 CapAdd(slack->Max(), transition_time)),
870 CapSub(last_interval->StartMax(),
871 CapAdd(slack->Min(), transition_time)));
872 if (performed) {
873 last_interval->SetStartRange(
874 CapAdd(interval->StartMin(),
875 CapAdd(slack->Min(), transition_time)),
876 CapAdd(interval->StartMax(),
877 CapAdd(slack->Max(), transition_time)));
878 }
879 }
880 }
881 }
882 // TODO(user): cache transition on ranked intervals in a vector.
883 // Propagates on ranked first from right to left.
884 for (int i = std::min(first_sentinel - 2, last_position - 1); i >= 0; --i) {
885 IntervalVar* const interval = RankedInterval(i);
886 IntervalVar* const next_interval = RankedInterval(i + 1);
887 IntVar* const slack = RankedSlack(i);
888 const int64_t transition_time = RankedTransitionTime(i, i + 1);
889 interval->SetStartRange(CapSub(next_interval->StartMin(),
890 CapAdd(slack->Max(), transition_time)),
891 CapSub(next_interval->StartMax(),
892 CapAdd(slack->Min(), transition_time)));
893 }
894 // Propagates on ranked last from left to right.
895 for (int i = last_sentinel + 1; i < last_position - 1; ++i) {
896 IntervalVar* const interval = RankedInterval(i);
897 IntervalVar* const next_interval = RankedInterval(i + 1);
898 IntVar* const slack = RankedSlack(i);
899 const int64_t transition_time = RankedTransitionTime(i, i + 1);
900 next_interval->SetStartRange(
901 CapAdd(interval->StartMin(), CapAdd(slack->Min(), transition_time)),
902 CapAdd(interval->StartMax(), CapAdd(slack->Max(), transition_time)));
903 }
904 // TODO(user) : Propagate on slacks.
905 }
906
RankedInterval(int i) const907 IntervalVar* RankedInterval(int i) const {
908 const int index = partial_sequence_[i];
909 return intervals_[index];
910 }
911
RankedSlack(int i) const912 IntVar* RankedSlack(int i) const {
913 const int index = partial_sequence_[i];
914 return slacks_[index];
915 }
916
RankedTransitionTime(int before,int after) const917 int64_t RankedTransitionTime(int before, int after) const {
918 const int before_index = partial_sequence_[before];
919 const int after_index = partial_sequence_[after];
920
921 return disjunctive_->TransitionTime(before_index, after_index);
922 }
923
DebugString() const924 std::string DebugString() const override {
925 return absl::StrFormat(
926 "RankedPropagator([%s], nexts = [%s], intervals = [%s])",
927 partial_sequence_.DebugString(), JoinDebugStringPtr(nexts_, ", "),
928 JoinDebugStringPtr(intervals_, ", "));
929 }
930
Accept(ModelVisitor * const visitor) const931 void Accept(ModelVisitor* const visitor) const override {
932 LOG(FATAL) << "Not yet implemented";
933 // TODO(user): IMPLEMENT ME.
934 }
935
936 private:
937 std::vector<IntVar*> nexts_;
938 std::vector<IntervalVar*> intervals_;
939 std::vector<IntVar*> slacks_;
940 DisjunctiveConstraint* const disjunctive_;
941 RevPartialSequence partial_sequence_;
942 std::vector<int> previous_;
943 };
944
945 // A class that stores several propagators for the sequence constraint, and
946 // calls them until a fixpoint is reached.
947
948 class FullDisjunctiveConstraint : public DisjunctiveConstraint {
949 public:
FullDisjunctiveConstraint(Solver * const s,const std::vector<IntervalVar * > & intervals,const std::string & name,bool strict)950 FullDisjunctiveConstraint(Solver* const s,
951 const std::vector<IntervalVar*>& intervals,
952 const std::string& name, bool strict)
953 : DisjunctiveConstraint(s, intervals, name),
954 sequence_var_(nullptr),
955 straight_(s, intervals, false, strict),
956 mirror_(s, intervals, true, strict),
957 straight_not_last_(s, intervals, false, strict),
958 mirror_not_last_(s, intervals, true, strict),
959 strict_(strict) {}
960
~FullDisjunctiveConstraint()961 ~FullDisjunctiveConstraint() override {}
962
Post()963 void Post() override {
964 Demon* const d = MakeDelayedConstraintDemon0(
965 solver(), this, &FullDisjunctiveConstraint::InitialPropagate,
966 "InitialPropagate");
967 for (int32_t i = 0; i < straight_.size(); ++i) {
968 straight_.interval(i)->WhenAnything(d);
969 }
970 }
971
InitialPropagate()972 void InitialPropagate() override {
973 bool all_optional_or_unperformed = true;
974 for (const IntervalVar* const interval : intervals_) {
975 if (interval->MustBePerformed()) {
976 all_optional_or_unperformed = false;
977 break;
978 }
979 }
980 if (all_optional_or_unperformed) { // Nothing to deduce
981 return;
982 }
983
984 bool all_times_fixed = true;
985 for (const IntervalVar* const interval : intervals_) {
986 if (interval->MayBePerformed() &&
987 (interval->StartMin() != interval->StartMax() ||
988 interval->DurationMin() != interval->DurationMax() ||
989 interval->EndMin() != interval->EndMax())) {
990 all_times_fixed = false;
991 break;
992 }
993 }
994
995 if (all_times_fixed) {
996 PropagatePerformed();
997 } else {
998 do {
999 do {
1000 do {
1001 // OverloadChecking is symmetrical. It has the same effect on the
1002 // straight and the mirrored version.
1003 straight_.OverloadChecking();
1004 } while (straight_.DetectablePrecedences() ||
1005 mirror_.DetectablePrecedences());
1006 } while (straight_not_last_.Propagate() ||
1007 mirror_not_last_.Propagate());
1008 } while (straight_.EdgeFinder() || mirror_.EdgeFinder());
1009 }
1010 }
1011
Intersect(IntervalVar * const i1,IntervalVar * const i2) const1012 bool Intersect(IntervalVar* const i1, IntervalVar* const i2) const {
1013 return i1->StartMin() < i2->EndMax() && i2->StartMin() < i1->EndMax();
1014 }
1015
PropagatePerformed()1016 void PropagatePerformed() {
1017 performed_.clear();
1018 optional_.clear();
1019 for (IntervalVar* const interval : intervals_) {
1020 if (interval->MustBePerformed()) {
1021 performed_.push_back(interval);
1022 } else if (interval->MayBePerformed()) {
1023 optional_.push_back(interval);
1024 }
1025 }
1026 // Checks feasibility of performed;
1027 if (performed_.empty()) return;
1028 std::sort(performed_.begin(), performed_.end(), IntervalStartMinLessThan);
1029 for (int i = 0; i < performed_.size() - 1; ++i) {
1030 if (performed_[i]->EndMax() > performed_[i + 1]->StartMin()) {
1031 solver()->Fail();
1032 }
1033 }
1034
1035 // Checks if optional intervals can be inserted.
1036 if (optional_.empty()) return;
1037 int index = 0;
1038 const int num_performed = performed_.size();
1039 std::sort(optional_.begin(), optional_.end(), IntervalStartMinLessThan);
1040 for (IntervalVar* const candidate : optional_) {
1041 const int64_t start = candidate->StartMin();
1042 while (index < num_performed && start >= performed_[index]->EndMax()) {
1043 index++;
1044 }
1045 if (index == num_performed) return;
1046 if (Intersect(candidate, performed_[index]) ||
1047 (index < num_performed - 1 &&
1048 Intersect(candidate, performed_[index + 1]))) {
1049 candidate->SetPerformed(false);
1050 }
1051 }
1052 }
1053
Accept(ModelVisitor * const visitor) const1054 void Accept(ModelVisitor* const visitor) const override {
1055 visitor->BeginVisitConstraint(ModelVisitor::kDisjunctive, this);
1056 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
1057 intervals_);
1058 if (sequence_var_ != nullptr) {
1059 visitor->VisitSequenceArgument(ModelVisitor::kSequenceArgument,
1060 sequence_var_);
1061 }
1062 visitor->EndVisitConstraint(ModelVisitor::kDisjunctive, this);
1063 }
1064
MakeSequenceVar()1065 SequenceVar* MakeSequenceVar() override {
1066 BuildNextModelIfNeeded();
1067 if (sequence_var_ == nullptr) {
1068 solver()->SaveValue(reinterpret_cast<void**>(&sequence_var_));
1069 sequence_var_ = solver()->RevAlloc(
1070 new SequenceVar(solver(), intervals_, nexts_, name()));
1071 }
1072 return sequence_var_;
1073 }
1074
DebugString() const1075 std::string DebugString() const override {
1076 return absl::StrFormat("FullDisjunctiveConstraint([%s], %i)",
1077 JoinDebugStringPtr(intervals_, ", "), strict_);
1078 }
1079
nexts() const1080 const std::vector<IntVar*>& nexts() const override { return nexts_; }
1081
actives() const1082 const std::vector<IntVar*>& actives() const override { return actives_; }
1083
time_cumuls() const1084 const std::vector<IntVar*>& time_cumuls() const override {
1085 return time_cumuls_;
1086 }
1087
time_slacks() const1088 const std::vector<IntVar*>& time_slacks() const override {
1089 return time_slacks_;
1090 }
1091
1092 private:
Distance(int64_t activity_plus_one,int64_t next_activity_plus_one)1093 int64_t Distance(int64_t activity_plus_one, int64_t next_activity_plus_one) {
1094 return (activity_plus_one == 0 ||
1095 next_activity_plus_one > intervals_.size())
1096 ? 0
1097 : transition_time_(activity_plus_one - 1,
1098 next_activity_plus_one - 1);
1099 }
1100
BuildNextModelIfNeeded()1101 void BuildNextModelIfNeeded() {
1102 if (!nexts_.empty()) {
1103 return;
1104 }
1105 Solver* const s = solver();
1106 const std::string& ct_name = name();
1107 const int num_intervals = intervals_.size();
1108 const int num_nodes = intervals_.size() + 1;
1109 int64_t horizon = 0;
1110 for (int i = 0; i < intervals_.size(); ++i) {
1111 if (intervals_[i]->MayBePerformed()) {
1112 horizon = std::max(horizon, intervals_[i]->EndMax());
1113 }
1114 }
1115
1116 // Create the next model.
1117 s->MakeIntVarArray(num_nodes, 1, num_nodes, ct_name + "_nexts", &nexts_);
1118 // Alldifferent on the nexts variable (the equivalent problem is a tsp).
1119 s->AddConstraint(s->MakeAllDifferent(nexts_));
1120
1121 actives_.resize(num_nodes);
1122 for (int i = 0; i < num_intervals; ++i) {
1123 actives_[i + 1] = intervals_[i]->PerformedExpr()->Var();
1124 s->AddConstraint(
1125 s->MakeIsDifferentCstCt(nexts_[i + 1], i + 1, actives_[i + 1]));
1126 }
1127 std::vector<IntVar*> short_actives(actives_.begin() + 1, actives_.end());
1128 actives_[0] = s->MakeMax(short_actives)->Var();
1129
1130 // No Cycle on the corresponding tsp.
1131 s->AddConstraint(s->MakeNoCycle(nexts_, actives_));
1132
1133 // Cumul on time.
1134 time_cumuls_.resize(num_nodes + 1);
1135 // Slacks between activities.
1136 time_slacks_.resize(num_nodes);
1137
1138 time_slacks_[0] = s->MakeIntVar(0, horizon, "initial_slack");
1139 // TODO(user): check this.
1140 time_cumuls_[0] = s->MakeIntConst(0);
1141
1142 for (int64_t i = 0; i < num_intervals; ++i) {
1143 IntervalVar* const var = intervals_[i];
1144 if (var->MayBePerformed()) {
1145 const int64_t duration_min = var->DurationMin();
1146 time_slacks_[i + 1] = s->MakeIntVar(
1147 duration_min, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1148 // TODO(user): Check SafeStartExpr();
1149 time_cumuls_[i + 1] = var->SafeStartExpr(var->StartMin())->Var();
1150 if (var->DurationMax() != duration_min) {
1151 s->AddConstraint(s->MakeGreaterOrEqual(
1152 time_slacks_[i + 1], var->SafeDurationExpr(duration_min)));
1153 }
1154 } else {
1155 time_slacks_[i + 1] = s->MakeIntVar(
1156 0, horizon, absl::StrFormat("time_slacks(%d)", i + 1));
1157 time_cumuls_[i + 1] = s->MakeIntConst(horizon);
1158 }
1159 }
1160 // TODO(user): Find a better UB for the last time cumul.
1161 time_cumuls_[num_nodes] = s->MakeIntVar(0, 2 * horizon, ct_name + "_ect");
1162 s->AddConstraint(s->MakePathCumul(
1163 nexts_, actives_, time_cumuls_, time_slacks_,
1164 [this](int64_t x, int64_t y) { return Distance(x, y); }));
1165
1166 std::vector<IntVar*> short_slacks(time_slacks_.begin() + 1,
1167 time_slacks_.end());
1168 s->AddConstraint(s->RevAlloc(
1169 new RankedPropagator(s, nexts_, intervals_, short_slacks, this)));
1170 }
1171
1172 SequenceVar* sequence_var_;
1173 EdgeFinderAndDetectablePrecedences straight_;
1174 EdgeFinderAndDetectablePrecedences mirror_;
1175 NotLast straight_not_last_;
1176 NotLast mirror_not_last_;
1177 std::vector<IntVar*> nexts_;
1178 std::vector<IntVar*> actives_;
1179 std::vector<IntVar*> time_cumuls_;
1180 std::vector<IntVar*> time_slacks_;
1181 std::vector<IntervalVar*> performed_;
1182 std::vector<IntervalVar*> optional_;
1183 const bool strict_;
1184 DISALLOW_COPY_AND_ASSIGN(FullDisjunctiveConstraint);
1185 };
1186
1187 // =====================================================================
1188 // Cumulative
1189 // =====================================================================
1190
1191 // A cumulative Theta node, where two energies, corresponding to 2 capacities,
1192 // are stored.
1193 struct DualCapacityThetaNode {
1194 // Special value for task indices meaning 'no such task'.
1195 static const int kNone;
1196
1197 // Identity constructor
DualCapacityThetaNodeoperations_research::__anon726a53860111::DualCapacityThetaNode1198 DualCapacityThetaNode()
1199 : energy(0LL),
1200 energetic_end_min(std::numeric_limits<int64_t>::min()),
1201 residual_energetic_end_min(std::numeric_limits<int64_t>::min()) {}
1202
1203 // Constructor for a single cumulative task in the Theta set.
DualCapacityThetaNodeoperations_research::__anon726a53860111::DualCapacityThetaNode1204 DualCapacityThetaNode(int64_t capacity, int64_t residual_capacity,
1205 const CumulativeTask& task)
1206 : energy(task.EnergyMin()),
1207 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1208 residual_energetic_end_min(
1209 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1210
1211 // Constructor for a single variable cumulative task in the Theta set.
DualCapacityThetaNodeoperations_research::__anon726a53860111::DualCapacityThetaNode1212 DualCapacityThetaNode(int64_t capacity, int64_t residual_capacity,
1213 const VariableCumulativeTask& task)
1214 : energy(task.EnergyMin()),
1215 energetic_end_min(CapAdd(capacity * task.interval->StartMin(), energy)),
1216 residual_energetic_end_min(
1217 CapAdd(residual_capacity * task.interval->StartMin(), energy)) {}
1218
1219 // Sets this DualCapacityThetaNode to the result of the natural binary
1220 // operation over the two given operands, corresponding to the following set
1221 // operation: Theta = left.Theta union right.Theta
1222 //
1223 // No set operation actually occur: we only maintain the relevant quantities
1224 // associated with such sets.
Computeoperations_research::__anon726a53860111::DualCapacityThetaNode1225 void Compute(const DualCapacityThetaNode& left,
1226 const DualCapacityThetaNode& right) {
1227 energy = CapAdd(left.energy, right.energy);
1228 energetic_end_min = std::max(CapAdd(left.energetic_end_min, right.energy),
1229 right.energetic_end_min);
1230 residual_energetic_end_min =
1231 std::max(CapAdd(left.residual_energetic_end_min, right.energy),
1232 right.residual_energetic_end_min);
1233 }
1234
1235 // Amount of resource consumed by the Theta set, in units of demand X time.
1236 // This is energy(Theta).
1237 int64_t energy;
1238
1239 // Max_{subset S of Theta} (capacity * start_min(S) + energy(S))
1240 int64_t energetic_end_min;
1241
1242 // Max_{subset S of Theta} (residual_capacity * start_min(S) + energy(S))
1243 int64_t residual_energetic_end_min;
1244 };
1245
1246 const int DualCapacityThetaNode::kNone = -1;
1247
1248 // A tree for dual capacity theta nodes
1249 class DualCapacityThetaTree
1250 : public MonoidOperationTree<DualCapacityThetaNode> {
1251 public:
1252 static const int64_t kNotInitialized;
1253
DualCapacityThetaTree(int size)1254 explicit DualCapacityThetaTree(int size)
1255 : MonoidOperationTree<DualCapacityThetaNode>(size),
1256 capacity_max_(-1),
1257 residual_capacity_(-1) {}
1258
~DualCapacityThetaTree()1259 virtual ~DualCapacityThetaTree() {}
1260
Init(int64_t capacity_max,int64_t residual_capacity)1261 void Init(int64_t capacity_max, int64_t residual_capacity) {
1262 DCHECK_LE(0, residual_capacity);
1263 DCHECK_LE(residual_capacity, capacity_max);
1264 Clear();
1265 capacity_max_ = capacity_max;
1266 residual_capacity_ = residual_capacity;
1267 }
1268
Insert(const CumulativeTask * task)1269 void Insert(const CumulativeTask* task) {
1270 Set(task->index,
1271 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1272 }
1273
Insert(const VariableCumulativeTask * task)1274 void Insert(const VariableCumulativeTask* task) {
1275 Set(task->index,
1276 DualCapacityThetaNode(capacity_max_, residual_capacity_, *task));
1277 }
1278
1279 private:
1280 int64_t capacity_max_;
1281 int64_t residual_capacity_;
1282 DISALLOW_COPY_AND_ASSIGN(DualCapacityThetaTree);
1283 };
1284
1285 const int64_t DualCapacityThetaTree::kNotInitialized = -1LL;
1286
1287 // An object that can dive down a branch of a DualCapacityThetaTree to compute
1288 // Env(j, c) in Petr Vilim's notations.
1289 //
1290 // In 'Edge finding filtering algorithm for discrete cumulative resources in
1291 // O(kn log n)' by Petr Vilim, this corresponds to line 6--8 in algorithm 1.3,
1292 // plus all of algorithm 1.2.
1293 //
1294 // http://vilim.eu/petr/cp2009.pdf
1295 // Note: use the version pointed to by this pointer, not the version from the
1296 // conference proceedings, which has a few errors.
1297 class EnvJCComputeDiver {
1298 public:
1299 static const int64_t kNotAvailable;
EnvJCComputeDiver(int energy_threshold)1300 explicit EnvJCComputeDiver(int energy_threshold)
1301 : energy_threshold_(energy_threshold),
1302 energy_alpha_(kNotAvailable),
1303 energetic_end_min_alpha_(kNotAvailable) {}
OnArgumentReached(int index,const DualCapacityThetaNode & argument)1304 void OnArgumentReached(int index, const DualCapacityThetaNode& argument) {
1305 energy_alpha_ = argument.energy;
1306 energetic_end_min_alpha_ = argument.energetic_end_min;
1307 // We should reach a leaf that is not the identity
1308 // DCHECK_GT(energetic_end_min_alpha_, kint64min);
1309 // TODO(user): Check me.
1310 }
ChooseGoLeft(const DualCapacityThetaNode & current,const DualCapacityThetaNode & left_child,const DualCapacityThetaNode & right_child)1311 bool ChooseGoLeft(const DualCapacityThetaNode& current,
1312 const DualCapacityThetaNode& left_child,
1313 const DualCapacityThetaNode& right_child) {
1314 if (right_child.residual_energetic_end_min > energy_threshold_) {
1315 return false; // enough energy on right
1316 } else {
1317 energy_threshold_ -= right_child.energy;
1318 return true;
1319 }
1320 }
OnComeBackFromLeft(const DualCapacityThetaNode & current,const DualCapacityThetaNode & left_child,const DualCapacityThetaNode & right_child)1321 void OnComeBackFromLeft(const DualCapacityThetaNode& current,
1322 const DualCapacityThetaNode& left_child,
1323 const DualCapacityThetaNode& right_child) {
1324 // The left subtree intersects the alpha set.
1325 // The right subtree does not intersect the alpha set.
1326 // The energy_alpha_ and energetic_end_min_alpha_ previously
1327 // computed are valid for this node too: there's nothing to do.
1328 }
OnComeBackFromRight(const DualCapacityThetaNode & current,const DualCapacityThetaNode & left_child,const DualCapacityThetaNode & right_child)1329 void OnComeBackFromRight(const DualCapacityThetaNode& current,
1330 const DualCapacityThetaNode& left_child,
1331 const DualCapacityThetaNode& right_child) {
1332 // The left subtree is included in the alpha set.
1333 // The right subtree intersects the alpha set.
1334 energetic_end_min_alpha_ =
1335 std::max(energetic_end_min_alpha_,
1336 CapAdd(left_child.energetic_end_min, energy_alpha_));
1337 energy_alpha_ += left_child.energy;
1338 }
GetEnvJC(const DualCapacityThetaNode & root) const1339 int64_t GetEnvJC(const DualCapacityThetaNode& root) const {
1340 const int64_t energy = root.energy;
1341 const int64_t energy_beta = CapSub(energy, energy_alpha_);
1342 return CapAdd(energetic_end_min_alpha_, energy_beta);
1343 }
1344
1345 private:
1346 // Energy threshold such that if a set has an energetic_end_min greater than
1347 // the threshold, then it can push tasks that must end at or after the
1348 // currently considered end max.
1349 //
1350 // Used when diving down only.
1351 int64_t energy_threshold_;
1352
1353 // Energy of the alpha set, that is, the set of tasks whose start min does not
1354 // exceed the max start min of a set with excess residual energy.
1355 //
1356 // Used when swimming up only.
1357 int64_t energy_alpha_;
1358
1359 // Energetic end min of the alpha set.
1360 //
1361 // Used when swimming up only.
1362 int64_t energetic_end_min_alpha_;
1363 };
1364
1365 const int64_t EnvJCComputeDiver::kNotAvailable = -1LL;
1366
1367 // In all the following, the term 'update' means 'a potential new start min for
1368 // a task'. The edge-finding algorithm is in two phase: one compute potential
1369 // new start mins, the other detects whether they are applicable or not for each
1370 // task.
1371
1372 // Collection of all updates (i.e., potential new start mins) for a given value
1373 // of the demand.
1374 class UpdatesForADemand {
1375 public:
UpdatesForADemand(int size)1376 explicit UpdatesForADemand(int size)
1377 : updates_(size, 0), up_to_date_(false) {}
1378
Update(int index)1379 const int64_t Update(int index) { return updates_[index]; }
Reset()1380 void Reset() { up_to_date_ = false; }
SetUpdate(int index,int64_t update)1381 void SetUpdate(int index, int64_t update) {
1382 DCHECK(!up_to_date_);
1383 DCHECK_LT(index, updates_.size());
1384 updates_[index] = update;
1385 }
up_to_date() const1386 bool up_to_date() const { return up_to_date_; }
set_up_to_date()1387 void set_up_to_date() { up_to_date_ = true; }
1388
1389 private:
1390 std::vector<int64_t> updates_;
1391 bool up_to_date_;
1392 DISALLOW_COPY_AND_ASSIGN(UpdatesForADemand);
1393 };
1394
1395 // One-sided cumulative edge finder.
1396 template <class Task>
1397 class EdgeFinder : public Constraint {
1398 public:
EdgeFinder(Solver * const solver,const std::vector<Task * > & tasks,IntVar * const capacity)1399 EdgeFinder(Solver* const solver, const std::vector<Task*>& tasks,
1400 IntVar* const capacity)
1401 : Constraint(solver),
1402 capacity_(capacity),
1403 tasks_(tasks),
1404 by_start_min_(tasks.size()),
1405 by_end_max_(tasks.size()),
1406 by_end_min_(tasks.size()),
1407 lt_tree_(tasks.size(), capacity_->Max()),
1408 dual_capacity_tree_(tasks.size()),
1409 has_zero_demand_tasks_(true) {}
1410
~EdgeFinder()1411 ~EdgeFinder() override {
1412 gtl::STLDeleteElements(&tasks_);
1413 gtl::STLDeleteValues(&update_map_);
1414 }
1415
Post()1416 void Post() override {
1417 // Add the demons
1418 Demon* const demon = MakeDelayedConstraintDemon0(
1419 solver(), this, &EdgeFinder::InitialPropagate, "RangeChanged");
1420 for (Task* const task : tasks_) {
1421 // Delay propagation, as this constraint is not incremental: we pay
1422 // O(n log n) each time the constraint is awakened.
1423 task->WhenAnything(demon);
1424 }
1425 capacity_->WhenRange(demon);
1426 }
1427
1428 // The propagation algorithms: checks for overloading, computes new start mins
1429 // according to the edge-finding rules, and applies them.
InitialPropagate()1430 void InitialPropagate() override {
1431 InitPropagation();
1432 PropagateBasedOnEndMinGreaterThanEndMax();
1433 FillInTree();
1434 PropagateBasedOnEnergy();
1435 ApplyNewBounds();
1436 }
1437
Accept(ModelVisitor * const visitor) const1438 void Accept(ModelVisitor* const visitor) const override {
1439 LOG(FATAL) << "Should Not Be Visited";
1440 }
1441
DebugString() const1442 std::string DebugString() const override { return "EdgeFinder"; }
1443
1444 private:
GetOrMakeUpdate(int64_t demand_min)1445 UpdatesForADemand* GetOrMakeUpdate(int64_t demand_min) {
1446 UpdatesForADemand* update = gtl::FindPtrOrNull(update_map_, demand_min);
1447 if (update == nullptr) {
1448 update = new UpdatesForADemand(tasks_.size());
1449 update_map_[demand_min] = update;
1450 }
1451 return update;
1452 }
1453
1454 // Sets the fields in a proper state to run the propagation algorithm.
InitPropagation()1455 void InitPropagation() {
1456 // Clear the update stack
1457 start_min_update_.clear();
1458 // Re_init vectors if has_zero_demand_tasks_ is true
1459 if (has_zero_demand_tasks_.Value()) {
1460 by_start_min_.clear();
1461 by_end_min_.clear();
1462 by_end_max_.clear();
1463 // Only populate tasks with demand_min > 0.
1464 bool zero_demand = false;
1465 for (Task* const task : tasks_) {
1466 if (task->DemandMin() > 0) {
1467 by_start_min_.push_back(task);
1468 by_end_min_.push_back(task);
1469 by_end_max_.push_back(task);
1470 } else {
1471 zero_demand = true;
1472 }
1473 }
1474 if (!zero_demand) {
1475 has_zero_demand_tasks_.SetValue(solver(), false);
1476 }
1477 }
1478
1479 // sort by start min.
1480 std::sort(by_start_min_.begin(), by_start_min_.end(),
1481 StartMinLessThan<Task>);
1482 for (int i = 0; i < by_start_min_.size(); ++i) {
1483 by_start_min_[i]->index = i;
1484 }
1485 // Sort by end max.
1486 std::sort(by_end_max_.begin(), by_end_max_.end(), EndMaxLessThan<Task>);
1487 // Sort by end min.
1488 std::sort(by_end_min_.begin(), by_end_min_.end(), EndMinLessThan<Task>);
1489 // Initialize the tree with the new capacity.
1490 lt_tree_.Init(capacity_->Max());
1491 // Clear updates
1492 for (const auto& entry : update_map_) {
1493 entry.second->Reset();
1494 }
1495 }
1496
1497 // Computes all possible update values for tasks of given demand, and stores
1498 // these values in update_map_[demand].
1499 // Runs in O(n log n).
1500 // This corresponds to lines 2--13 in algorithm 1.3 in Petr Vilim's paper.
ComputeConditionalStartMins(UpdatesForADemand * updates,int64_t demand_min)1501 void ComputeConditionalStartMins(UpdatesForADemand* updates,
1502 int64_t demand_min) {
1503 DCHECK_GT(demand_min, 0);
1504 DCHECK(updates != nullptr);
1505 const int64_t capacity_max = capacity_->Max();
1506 const int64_t residual_capacity = CapSub(capacity_max, demand_min);
1507 dual_capacity_tree_.Init(capacity_max, residual_capacity);
1508 // It's important to initialize the update at IntervalVar::kMinValidValue
1509 // rather than at kInt64min, because its opposite may be used if it's a
1510 // mirror variable, and
1511 // -kInt64min = -(-kInt64max - 1) = kInt64max + 1 = -kInt64min
1512 int64_t update = IntervalVar::kMinValidValue;
1513 for (int i = 0; i < by_end_max_.size(); ++i) {
1514 Task* const task = by_end_max_[i];
1515 if (task->EnergyMin() == 0) continue;
1516 const int64_t current_end_max = task->interval->EndMax();
1517 dual_capacity_tree_.Insert(task);
1518 const int64_t energy_threshold = residual_capacity * current_end_max;
1519 const DualCapacityThetaNode& root = dual_capacity_tree_.result();
1520 const int64_t res_energetic_end_min = root.residual_energetic_end_min;
1521 if (res_energetic_end_min > energy_threshold) {
1522 EnvJCComputeDiver diver(energy_threshold);
1523 dual_capacity_tree_.DiveInTree(&diver);
1524 const int64_t enjv = diver.GetEnvJC(dual_capacity_tree_.result());
1525 const int64_t numerator = CapSub(enjv, energy_threshold);
1526 const int64_t diff = MathUtil::CeilOfRatio(numerator, demand_min);
1527 update = std::max(update, diff);
1528 }
1529 updates->SetUpdate(i, update);
1530 }
1531 updates->set_up_to_date();
1532 }
1533
1534 // Returns the new start min that can be inferred for task_to_push if it is
1535 // proved that it cannot end before by_end_max[end_max_index] does.
ConditionalStartMin(const Task & task_to_push,int end_max_index)1536 int64_t ConditionalStartMin(const Task& task_to_push, int end_max_index) {
1537 if (task_to_push.EnergyMin() == 0) {
1538 return task_to_push.interval->StartMin();
1539 }
1540 const int64_t demand_min = task_to_push.DemandMin();
1541 UpdatesForADemand* const updates = GetOrMakeUpdate(demand_min);
1542 if (!updates->up_to_date()) {
1543 ComputeConditionalStartMins(updates, demand_min);
1544 }
1545 DCHECK(updates->up_to_date());
1546 return updates->Update(end_max_index);
1547 }
1548
1549 // Propagates by discovering all end-after-end relationships purely based on
1550 // comparisons between end mins and end maxes: there is no energetic reasoning
1551 // here, but this allow updates that the standard edge-finding detection rule
1552 // misses.
1553 // See paragraph 6.2 in http://vilim.eu/petr/cp2009.pdf.
PropagateBasedOnEndMinGreaterThanEndMax()1554 void PropagateBasedOnEndMinGreaterThanEndMax() {
1555 int end_max_index = 0;
1556 int64_t max_start_min = std::numeric_limits<int64_t>::min();
1557 for (Task* const task : by_end_min_) {
1558 const int64_t end_min = task->interval->EndMin();
1559 while (end_max_index < by_start_min_.size() &&
1560 by_end_max_[end_max_index]->interval->EndMax() <= end_min) {
1561 max_start_min = std::max(
1562 max_start_min, by_end_max_[end_max_index]->interval->StartMin());
1563 ++end_max_index;
1564 }
1565 if (end_max_index > 0 && task->interval->StartMin() <= max_start_min &&
1566 task->interval->EndMax() > task->interval->EndMin()) {
1567 DCHECK_LE(by_end_max_[end_max_index - 1]->interval->EndMax(), end_min);
1568 // The update is valid and may be interesting:
1569 // * If task->StartMin() > max_start_min, then all tasks whose end_max
1570 // is less than or equal to end_min have a start min that is less
1571 // than task->StartMin(). In this case, any update we could
1572 // compute would also be computed by the standard edge-finding
1573 // rule. It's better not to compute it, then: it may not be
1574 // needed.
1575 // * If task->EndMax() <= task->EndMin(), that means the end max is
1576 // bound. In that case, 'task' itself belong to the set of tasks
1577 // that must end before end_min, which may cause the result of
1578 // ConditionalStartMin(task, end_max_index - 1) not to be a valid
1579 // update.
1580 const int64_t update = ConditionalStartMin(*task, end_max_index - 1);
1581 start_min_update_.push_back(std::make_pair(task->interval, update));
1582 }
1583 }
1584 }
1585
1586 // Fill the theta-lambda-tree, and check for overloading.
FillInTree()1587 void FillInTree() {
1588 for (Task* const task : by_end_max_) {
1589 lt_tree_.Insert(*task);
1590 // Maximum energetic end min without overload.
1591 const int64_t max_feasible =
1592 CapProd(capacity_->Max(), task->interval->EndMax());
1593 if (lt_tree_.energetic_end_min() > max_feasible) {
1594 solver()->Fail();
1595 }
1596 }
1597 }
1598
1599 // The heart of the propagation algorithm. Should be called with all tasks
1600 // being in the Theta set. It detects tasks that need to be pushed.
PropagateBasedOnEnergy()1601 void PropagateBasedOnEnergy() {
1602 for (int j = by_start_min_.size() - 2; j >= 0; --j) {
1603 lt_tree_.Grey(*by_end_max_[j + 1]);
1604 Task* const twj = by_end_max_[j];
1605 // We should have checked for overload earlier.
1606 const int64_t max_feasible =
1607 CapProd(capacity_->Max(), twj->interval->EndMax());
1608 DCHECK_LE(lt_tree_.energetic_end_min(), max_feasible);
1609 while (lt_tree_.energetic_end_min_opt() > max_feasible) {
1610 const int i = lt_tree_.argmax_energetic_end_min_opt();
1611 DCHECK_GE(i, 0);
1612 PropagateTaskCannotEndBefore(i, j);
1613 lt_tree_.Reset(i);
1614 }
1615 }
1616 }
1617
1618 // Takes into account the fact that the task of given index cannot end before
1619 // the given new end min.
PropagateTaskCannotEndBefore(int index,int end_max_index)1620 void PropagateTaskCannotEndBefore(int index, int end_max_index) {
1621 Task* const task_to_push = by_start_min_[index];
1622 const int64_t update = ConditionalStartMin(*task_to_push, end_max_index);
1623 start_min_update_.push_back(std::make_pair(task_to_push->interval, update));
1624 }
1625
1626 // Applies the previously computed updates.
ApplyNewBounds()1627 void ApplyNewBounds() {
1628 for (const std::pair<IntervalVar*, int64_t>& update : start_min_update_) {
1629 update.first->SetStartMin(update.second);
1630 }
1631 }
1632
1633 // Capacity of the cumulative resource.
1634 IntVar* const capacity_;
1635
1636 // Initial vector of tasks
1637 std::vector<Task*> tasks_;
1638
1639 // Cumulative tasks, ordered by non-decreasing start min.
1640 std::vector<Task*> by_start_min_;
1641
1642 // Cumulative tasks, ordered by non-decreasing end max.
1643 std::vector<Task*> by_end_max_;
1644
1645 // Cumulative tasks, ordered by non-decreasing end min.
1646 std::vector<Task*> by_end_min_;
1647
1648 // Cumulative theta-lamba tree.
1649 CumulativeLambdaThetaTree lt_tree_;
1650
1651 // Needed by ComputeConditionalStartMins.
1652 DualCapacityThetaTree dual_capacity_tree_;
1653
1654 // Stack of updates to the new start min to do.
1655 std::vector<std::pair<IntervalVar*, int64_t>> start_min_update_;
1656
1657 // update_map_[d][i] is an integer such that if a task
1658 // whose demand is d cannot end before by_end_max_[i], then it cannot start
1659 // before update_map_[d][i].
1660 absl::flat_hash_map<int64_t, UpdatesForADemand*> update_map_;
1661
1662 // Has one task a demand min == 0
1663 Rev<bool> has_zero_demand_tasks_;
1664
1665 DISALLOW_COPY_AND_ASSIGN(EdgeFinder);
1666 };
1667
1668 // A point in time where the usage profile changes.
1669 // Starting from time (included), the usage is what it was immediately before
1670 // time, plus the delta.
1671 //
1672 // Example:
1673 // Consider the following vector of ProfileDelta's:
1674 // { t=1, d=+3}, { t=4, d=+1 }, { t=5, d=-2}, { t=8, d=-1}
1675 // This represents the following usage profile:
1676 //
1677 // usage
1678 // 4 | ****.
1679 // 3 | ************. .
1680 // 2 | . . ************.
1681 // 1 | . . . .
1682 // 0 |*******----------------------------*******************-> time
1683 // 0 1 2 3 4 5 6 7 8 9
1684 //
1685 // Note that the usage profile is right-continuous (see
1686 // http://en.wikipedia.org/wiki/Left-continuous#Directional_continuity).
1687 // This is because intervals for tasks are always closed on the start side
1688 // and open on the end side.
1689 struct ProfileDelta {
ProfileDeltaoperations_research::__anon726a53860111::ProfileDelta1690 ProfileDelta(int64_t _time, int64_t _delta) : time(_time), delta(_delta) {}
1691 int64_t time;
1692 int64_t delta;
1693 };
1694
TimeLessThan(const ProfileDelta & delta1,const ProfileDelta & delta2)1695 bool TimeLessThan(const ProfileDelta& delta1, const ProfileDelta& delta2) {
1696 return delta1.time < delta2.time;
1697 }
1698
1699 // Cumulative time-table.
1700 //
1701 // This class implements a propagator for the CumulativeConstraint which is not
1702 // incremental, and where a call to InitialPropagate() takes time which is
1703 // O(n^2) and Omega(n log n) with n the number of cumulative tasks.
1704 //
1705 // Despite the high complexity, this propagator is needed, because of those
1706 // implemented, it is the only one that satisfy that if all instantiated, no
1707 // contradiction will be detected if and only if the constraint is satisfied.
1708 //
1709 // The implementation is quite naive, and could certainly be improved, for
1710 // example by maintaining the profile incrementally.
1711 template <class Task>
1712 class CumulativeTimeTable : public Constraint {
1713 public:
CumulativeTimeTable(Solver * const solver,const std::vector<Task * > & tasks,IntVar * const capacity)1714 CumulativeTimeTable(Solver* const solver, const std::vector<Task*>& tasks,
1715 IntVar* const capacity)
1716 : Constraint(solver), by_start_min_(tasks), capacity_(capacity) {
1717 // There may be up to 2 delta's per interval (one on each side),
1718 // plus two sentinels
1719 const int profile_max_size = 2 * by_start_min_.size() + 2;
1720 profile_non_unique_time_.reserve(profile_max_size);
1721 profile_unique_time_.reserve(profile_max_size);
1722 }
1723
~CumulativeTimeTable()1724 ~CumulativeTimeTable() override { gtl::STLDeleteElements(&by_start_min_); }
1725
InitialPropagate()1726 void InitialPropagate() override {
1727 BuildProfile();
1728 PushTasks();
1729 // TODO(user): When a task has a fixed part, we could propagate
1730 // max_demand from its current location.
1731 }
1732
Post()1733 void Post() override {
1734 Demon* demon = MakeDelayedConstraintDemon0(
1735 solver(), this, &CumulativeTimeTable::InitialPropagate,
1736 "InitialPropagate");
1737 for (Task* const task : by_start_min_) {
1738 task->WhenAnything(demon);
1739 }
1740 capacity_->WhenRange(demon);
1741 }
1742
Accept(ModelVisitor * const visitor) const1743 void Accept(ModelVisitor* const visitor) const override {
1744 LOG(FATAL) << "Should not be visited";
1745 }
1746
DebugString() const1747 std::string DebugString() const override { return "CumulativeTimeTable"; }
1748
1749 private:
1750 // Build the usage profile. Runs in O(n log n).
BuildProfile()1751 void BuildProfile() {
1752 // Build profile with non unique time
1753 profile_non_unique_time_.clear();
1754 for (const Task* const task : by_start_min_) {
1755 const IntervalVar* const interval = task->interval;
1756 const int64_t start_max = interval->StartMax();
1757 const int64_t end_min = interval->EndMin();
1758 if (interval->MustBePerformed() && start_max < end_min) {
1759 const int64_t demand_min = task->DemandMin();
1760 if (demand_min > 0) {
1761 profile_non_unique_time_.emplace_back(start_max, +demand_min);
1762 profile_non_unique_time_.emplace_back(end_min, -demand_min);
1763 }
1764 }
1765 }
1766 // Sort
1767 std::sort(profile_non_unique_time_.begin(), profile_non_unique_time_.end(),
1768 TimeLessThan);
1769 // Build profile with unique times
1770 profile_unique_time_.clear();
1771 profile_unique_time_.emplace_back(std::numeric_limits<int64_t>::min(), 0);
1772 int64_t usage = 0;
1773 for (const ProfileDelta& step : profile_non_unique_time_) {
1774 if (step.time == profile_unique_time_.back().time) {
1775 profile_unique_time_.back().delta += step.delta;
1776 } else {
1777 profile_unique_time_.push_back(step);
1778 }
1779 // Update usage.
1780 usage += step.delta;
1781 }
1782 // Check final usage to be 0.
1783 DCHECK_EQ(0, usage);
1784 // Scan to find max usage.
1785 int64_t max_usage = 0;
1786 for (const ProfileDelta& step : profile_unique_time_) {
1787 usage += step.delta;
1788 if (usage > max_usage) {
1789 max_usage = usage;
1790 }
1791 }
1792 DCHECK_EQ(0, usage);
1793 capacity_->SetMin(max_usage);
1794 // Add a sentinel.
1795 profile_unique_time_.emplace_back(std::numeric_limits<int64_t>::max(), 0);
1796 }
1797
1798 // Update the start min for all tasks. Runs in O(n^2) and Omega(n).
PushTasks()1799 void PushTasks() {
1800 std::sort(by_start_min_.begin(), by_start_min_.end(),
1801 StartMinLessThan<Task>);
1802 int64_t usage = 0;
1803 int profile_index = 0;
1804 for (const Task* const task : by_start_min_) {
1805 const IntervalVar* const interval = task->interval;
1806 if (interval->StartMin() == interval->StartMax() &&
1807 interval->EndMin() == interval->EndMax()) {
1808 continue;
1809 }
1810 while (interval->StartMin() > profile_unique_time_[profile_index].time) {
1811 DCHECK(profile_index < profile_unique_time_.size());
1812 ++profile_index;
1813 usage += profile_unique_time_[profile_index].delta;
1814 }
1815 PushTask(task, profile_index, usage);
1816 }
1817 }
1818
1819 // Push the given task to new_start_min, defined as the smallest integer such
1820 // that the profile usage for all tasks, excluding the current one, does not
1821 // exceed capacity_ - task->demand on the interval
1822 // [new_start_min, new_start_min + task->interval->DurationMin() ).
PushTask(const Task * const task,int profile_index,int64_t usage)1823 void PushTask(const Task* const task, int profile_index, int64_t usage) {
1824 // Init
1825 const IntervalVar* const interval = task->interval;
1826 const int64_t demand_min = task->DemandMin();
1827 if (demand_min == 0) { // Demand can be null, nothing to propagate.
1828 return;
1829 }
1830 const int64_t residual_capacity = CapSub(capacity_->Max(), demand_min);
1831 const int64_t duration = task->interval->DurationMin();
1832 const ProfileDelta& first_prof_delta = profile_unique_time_[profile_index];
1833
1834 int64_t new_start_min = interval->StartMin();
1835
1836 DCHECK_GE(first_prof_delta.time, interval->StartMin());
1837 // The check above is with a '>='. Let's first treat the '>' case
1838 if (first_prof_delta.time > interval->StartMin()) {
1839 // There was no profile delta at a time between interval->StartMin()
1840 // (included) and the current one.
1841 // As we don't delete delta's of 0 value, this means the current task
1842 // does not contribute to the usage before:
1843 DCHECK((interval->StartMax() >= first_prof_delta.time) ||
1844 (interval->StartMax() >= interval->EndMin()));
1845 // The 'usage' given in argument is valid at first_prof_delta.time. To
1846 // compute the usage at the start min, we need to remove the last delta.
1847 const int64_t usage_at_start_min = CapSub(usage, first_prof_delta.delta);
1848 if (usage_at_start_min > residual_capacity) {
1849 new_start_min = profile_unique_time_[profile_index].time;
1850 }
1851 }
1852
1853 // Influence of current task
1854 const int64_t start_max = interval->StartMax();
1855 const int64_t end_min = interval->EndMin();
1856 ProfileDelta delta_start(start_max, 0);
1857 ProfileDelta delta_end(end_min, 0);
1858 if (interval->MustBePerformed() && start_max < end_min) {
1859 delta_start.delta = +demand_min;
1860 delta_end.delta = -demand_min;
1861 }
1862 while (profile_unique_time_[profile_index].time <
1863 CapAdd(duration, new_start_min)) {
1864 const ProfileDelta& profile_delta = profile_unique_time_[profile_index];
1865 DCHECK(profile_index < profile_unique_time_.size());
1866 // Compensate for current task
1867 if (profile_delta.time == delta_start.time) {
1868 usage -= delta_start.delta;
1869 }
1870 if (profile_delta.time == delta_end.time) {
1871 usage -= delta_end.delta;
1872 }
1873 // Increment time
1874 ++profile_index;
1875 DCHECK(profile_index < profile_unique_time_.size());
1876 // Does it fit?
1877 if (usage > residual_capacity) {
1878 new_start_min = profile_unique_time_[profile_index].time;
1879 }
1880 usage += profile_unique_time_[profile_index].delta;
1881 }
1882 task->interval->SetStartMin(new_start_min);
1883 }
1884
1885 typedef std::vector<ProfileDelta> Profile;
1886
1887 Profile profile_unique_time_;
1888 Profile profile_non_unique_time_;
1889 std::vector<Task*> by_start_min_;
1890 IntVar* const capacity_;
1891
1892 DISALLOW_COPY_AND_ASSIGN(CumulativeTimeTable);
1893 };
1894
1895 // Cumulative idempotent Time-Table.
1896 //
1897 // This propagator is based on Letort et al. 2012 add Gay et al. 2015.
1898 //
1899 // TODO(user): fill the description once the incremental aspect are
1900 // implemented.
1901 //
1902 // Worst case: O(n^2 log n) -- really unlikely in practice.
1903 // Best case: Omega(1).
1904 // Practical: Almost linear in the number of unfixed tasks.
1905 template <class Task>
1906 class TimeTableSync : public Constraint {
1907 public:
TimeTableSync(Solver * const solver,const std::vector<Task * > & tasks,IntVar * const capacity)1908 TimeTableSync(Solver* const solver, const std::vector<Task*>& tasks,
1909 IntVar* const capacity)
1910 : Constraint(solver), tasks_(tasks), capacity_(capacity) {
1911 num_tasks_ = tasks_.size();
1912 gap_ = 0;
1913 prev_gap_ = 0;
1914 pos_ = std::numeric_limits<int64_t>::min();
1915 next_pos_ = std::numeric_limits<int64_t>::min();
1916 // Allocate vectors to contain no more than n_tasks.
1917 start_min_.reserve(num_tasks_);
1918 start_max_.reserve(num_tasks_);
1919 end_min_.reserve(num_tasks_);
1920 durations_.reserve(num_tasks_);
1921 demands_.reserve(num_tasks_);
1922 }
1923
~TimeTableSync()1924 ~TimeTableSync() override { gtl::STLDeleteElements(&tasks_); }
1925
InitialPropagate()1926 void InitialPropagate() override {
1927 // Reset data structures.
1928 BuildEvents();
1929 while (!events_scp_.empty() && !events_ecp_.empty()) {
1930 // Move the sweep line.
1931 pos_ = NextEventTime();
1932 // Update the profile with compulsory part events.
1933 ProcessEventsScp();
1934 ProcessEventsEcp();
1935 // Update minimum capacity (may fail)
1936 capacity_->SetMin(capacity_->Max() - gap_);
1937 // Time to the next possible profile increase.
1938 next_pos_ = NextScpTime();
1939 // Consider new task to schedule.
1940 ProcessEventsPr();
1941 // Filter.
1942 FilterMin();
1943 }
1944 }
1945
Post()1946 void Post() override {
1947 Demon* demon = MakeDelayedConstraintDemon0(
1948 solver(), this, &TimeTableSync::InitialPropagate, "InitialPropagate");
1949 for (Task* const task : tasks_) {
1950 task->WhenAnything(demon);
1951 }
1952 capacity_->WhenRange(demon);
1953 }
1954
Accept(ModelVisitor * const visitor) const1955 void Accept(ModelVisitor* const visitor) const override {
1956 LOG(FATAL) << "Should not be visited";
1957 }
1958
DebugString() const1959 std::string DebugString() const override { return "TimeTableSync"; }
1960
1961 private:
1962 // Task state.
1963 enum State { NONE, READY, CHECK, CONFLICT };
1964
NextScpTime()1965 inline int64_t NextScpTime() {
1966 return !events_scp_.empty() ? events_scp_.top().first
1967 : std::numeric_limits<int64_t>::max();
1968 }
1969
NextEventTime()1970 inline int64_t NextEventTime() {
1971 int64_t time = std::numeric_limits<int64_t>::max();
1972 if (!events_pr_.empty()) {
1973 time = events_pr_.top().first;
1974 }
1975 if (!events_scp_.empty()) {
1976 int64_t t = events_scp_.top().first;
1977 time = t < time ? t : time;
1978 }
1979 if (!events_ecp_.empty()) {
1980 int64_t t = events_ecp_.top().first;
1981 time = t < time ? t : time;
1982 }
1983 return time;
1984 }
1985
ProcessEventsScp()1986 void ProcessEventsScp() {
1987 while (!events_scp_.empty() && events_scp_.top().first == pos_) {
1988 const int64_t task_id = events_scp_.top().second;
1989 events_scp_.pop();
1990 const int64_t old_end_min = end_min_[task_id];
1991 if (states_[task_id] == State::CONFLICT) {
1992 // Update cached values.
1993 const int64_t new_end_min = pos_ + durations_[task_id];
1994 start_min_[task_id] = pos_;
1995 end_min_[task_id] = new_end_min;
1996 // Filter the domain
1997 tasks_[task_id]->interval->SetStartMin(pos_);
1998 }
1999 // The task is scheduled.
2000 states_[task_id] = State::READY;
2001 // Update the profile if the task has a compulsory part.
2002 if (pos_ < end_min_[task_id]) {
2003 gap_ -= demands_[task_id];
2004 if (old_end_min <= pos_) {
2005 events_ecp_.push(kv(end_min_[task_id], task_id));
2006 }
2007 }
2008 }
2009 }
2010
ProcessEventsEcp()2011 void ProcessEventsEcp() {
2012 while (!events_ecp_.empty() && events_ecp_.top().first == pos_) {
2013 const int64_t task_id = events_ecp_.top().second;
2014 events_ecp_.pop();
2015 // Update the event if it is not up to date.
2016 if (pos_ < end_min_[task_id]) {
2017 events_ecp_.push(kv(end_min_[task_id], task_id));
2018 } else {
2019 gap_ += demands_[task_id];
2020 }
2021 }
2022 }
2023
ProcessEventsPr()2024 void ProcessEventsPr() {
2025 while (!events_pr_.empty() && events_pr_.top().first == pos_) {
2026 const int64_t task_id = events_pr_.top().second;
2027 events_pr_.pop();
2028 // The task is in conflict with the current profile.
2029 if (demands_[task_id] > gap_) {
2030 states_[task_id] = State::CONFLICT;
2031 conflict_.push(kv(demands_[task_id], task_id));
2032 continue;
2033 }
2034 // The task is not in conflict for the moment.
2035 if (next_pos_ < end_min_[task_id]) {
2036 states_[task_id] = State::CHECK;
2037 check_.push(kv(demands_[task_id], task_id));
2038 continue;
2039 }
2040 // The task is not in conflict and can be scheduled.
2041 states_[task_id] = State::READY;
2042 }
2043 }
2044
FilterMin()2045 void FilterMin() {
2046 // The profile exceeds the capacity.
2047 capacity_->SetMin(capacity_->Max() - gap_);
2048 // The profile has increased.
2049 if (gap_ < prev_gap_) {
2050 // Reconsider the task in check state.
2051 while (!check_.empty() && demands_[check_.top().second] > gap_) {
2052 const int64_t task_id = check_.top().second;
2053 check_.pop();
2054 if (states_[task_id] == State::CHECK && pos_ < end_min_[task_id]) {
2055 states_[task_id] = State::CONFLICT;
2056 conflict_.push(kv(demands_[task_id], task_id));
2057 continue;
2058 }
2059 states_[task_id] = State::READY;
2060 }
2061 prev_gap_ = gap_;
2062 }
2063 // The profile has decreased.
2064 if (gap_ > prev_gap_) {
2065 // Reconsider the tasks in conflict.
2066 while (!conflict_.empty() && demands_[conflict_.top().second] <= gap_) {
2067 const int64_t task_id = conflict_.top().second;
2068 conflict_.pop();
2069 if (states_[task_id] != State::CONFLICT) {
2070 continue;
2071 }
2072 const int64_t old_end_min = end_min_[task_id];
2073 // Update the cache.
2074 start_min_[task_id] = pos_;
2075 end_min_[task_id] = pos_ + durations_[task_id];
2076 // Filter the domain.
2077 tasks_[task_id]->interval->SetStartMin(pos_); // should not fail.
2078 // The task still have to be checked.
2079 if (next_pos_ < end_min_[task_id]) {
2080 states_[task_id] = State::CHECK;
2081 check_.push(kv(demands_[task_id], task_id));
2082 } else {
2083 states_[task_id] = State::READY;
2084 }
2085 // Update possible compulsory part.
2086 const int64_t start_max = start_max_[task_id];
2087 if (start_max >= old_end_min && start_max < end_min_[task_id]) {
2088 events_ecp_.push(kv(end_min_[task_id], task_id));
2089 }
2090 }
2091 }
2092 prev_gap_ = gap_;
2093 }
2094
BuildEvents()2095 void BuildEvents() {
2096 // Reset the sweep line.
2097 pos_ = std::numeric_limits<int64_t>::min();
2098 next_pos_ = std::numeric_limits<int64_t>::min();
2099 gap_ = capacity_->Max();
2100 prev_gap_ = capacity_->Max();
2101 // Reset dynamic states.
2102 conflict_ = min_heap();
2103 check_ = max_heap();
2104 // Reset profile events.
2105 events_pr_ = min_heap();
2106 events_scp_ = min_heap();
2107 events_ecp_ = min_heap();
2108 // Reset cache.
2109 start_min_.clear();
2110 start_max_.clear();
2111 end_min_.clear();
2112 durations_.clear();
2113 demands_.clear();
2114 states_.clear();
2115 // Build events.
2116 for (int i = 0; i < num_tasks_; i++) {
2117 const int64_t s_min = tasks_[i]->interval->StartMin();
2118 const int64_t s_max = tasks_[i]->interval->StartMax();
2119 const int64_t e_min = tasks_[i]->interval->EndMin();
2120 // Cache the values.
2121 start_min_.push_back(s_min);
2122 start_max_.push_back(s_max);
2123 end_min_.push_back(e_min);
2124 durations_.push_back(tasks_[i]->interval->DurationMin());
2125 demands_.push_back(tasks_[i]->DemandMin());
2126 // Reset task state.
2127 states_.push_back(State::NONE);
2128 // Start compulsory part event.
2129 events_scp_.push(kv(s_max, i));
2130 // Pruning event only if the start time of the task is not fixed.
2131 if (s_min != s_max) {
2132 events_pr_.push(kv(s_min, i));
2133 }
2134 // End of compulsory part only if the task has a compulsory part.
2135 if (s_max < e_min) {
2136 events_ecp_.push(kv(e_min, i));
2137 }
2138 }
2139 }
2140
2141 int64_t num_tasks_;
2142 std::vector<Task*> tasks_;
2143 IntVar* const capacity_;
2144
2145 std::vector<int64_t> start_min_;
2146 std::vector<int64_t> start_max_;
2147 std::vector<int64_t> end_min_;
2148 std::vector<int64_t> end_max_;
2149 std::vector<int64_t> durations_;
2150 std::vector<int64_t> demands_;
2151
2152 // Pair key value.
2153 typedef std::pair<int64_t, int64_t> kv;
2154 typedef std::priority_queue<kv, std::vector<kv>, std::greater<kv>> min_heap;
2155 typedef std::priority_queue<kv, std::vector<kv>, std::less<kv>> max_heap;
2156
2157 // Profile events.
2158 min_heap events_pr_;
2159 min_heap events_scp_;
2160 min_heap events_ecp_;
2161
2162 // Task state.
2163 std::vector<State> states_;
2164 min_heap conflict_;
2165 max_heap check_;
2166
2167 // Sweep line state.
2168 int64_t pos_;
2169 int64_t next_pos_;
2170 int64_t gap_;
2171 int64_t prev_gap_;
2172 };
2173
2174 class CumulativeConstraint : public Constraint {
2175 public:
CumulativeConstraint(Solver * const s,const std::vector<IntervalVar * > & intervals,const std::vector<int64_t> & demands,IntVar * const capacity,const std::string & name)2176 CumulativeConstraint(Solver* const s,
2177 const std::vector<IntervalVar*>& intervals,
2178 const std::vector<int64_t>& demands,
2179 IntVar* const capacity, const std::string& name)
2180 : Constraint(s),
2181 capacity_(capacity),
2182 intervals_(intervals),
2183 demands_(demands) {
2184 tasks_.reserve(intervals.size());
2185 for (int i = 0; i < intervals.size(); ++i) {
2186 tasks_.push_back(CumulativeTask(intervals[i], demands[i]));
2187 }
2188 }
2189
Post()2190 void Post() override {
2191 // For the cumulative constraint, there are many propagators, and they
2192 // don't dominate each other. So the strongest propagation is obtained
2193 // by posting a bunch of different propagators.
2194 const ConstraintSolverParameters& params = solver()->parameters();
2195 if (params.use_cumulative_time_table()) {
2196 if (params.use_cumulative_time_table_sync()) {
2197 PostOneSidedConstraint(false, false, true);
2198 PostOneSidedConstraint(true, false, true);
2199 } else {
2200 PostOneSidedConstraint(false, false, false);
2201 PostOneSidedConstraint(true, false, false);
2202 }
2203 }
2204 if (params.use_cumulative_edge_finder()) {
2205 PostOneSidedConstraint(false, true, false);
2206 PostOneSidedConstraint(true, true, false);
2207 }
2208 if (params.use_sequence_high_demand_tasks()) {
2209 PostHighDemandSequenceConstraint();
2210 }
2211 if (params.use_all_possible_disjunctions()) {
2212 PostAllDisjunctions();
2213 }
2214 }
2215
InitialPropagate()2216 void InitialPropagate() override {
2217 // Nothing to do: this constraint delegates all the work to other classes
2218 }
2219
Accept(ModelVisitor * const visitor) const2220 void Accept(ModelVisitor* const visitor) const override {
2221 // TODO(user): Build arrays on demand?
2222 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2223 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2224 intervals_);
2225 visitor->VisitIntegerArrayArgument(ModelVisitor::kDemandsArgument,
2226 demands_);
2227 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2228 capacity_);
2229 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2230 }
2231
DebugString() const2232 std::string DebugString() const override {
2233 return absl::StrFormat("CumulativeConstraint([%s], %s)",
2234 JoinDebugString(tasks_, ", "),
2235 capacity_->DebugString());
2236 }
2237
2238 private:
2239 // Post temporal disjunctions for tasks that cannot overlap.
PostAllDisjunctions()2240 void PostAllDisjunctions() {
2241 for (int i = 0; i < intervals_.size(); ++i) {
2242 IntervalVar* const interval_i = intervals_[i];
2243 if (interval_i->MayBePerformed()) {
2244 for (int j = i + 1; j < intervals_.size(); ++j) {
2245 IntervalVar* const interval_j = intervals_[j];
2246 if (interval_j->MayBePerformed()) {
2247 if (CapAdd(tasks_[i].demand, tasks_[j].demand) > capacity_->Max()) {
2248 Constraint* const constraint =
2249 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2250 solver()->AddConstraint(constraint);
2251 }
2252 }
2253 }
2254 }
2255 }
2256 }
2257
2258 // Post a Sequence constraint for tasks that requires strictly more than half
2259 // of the resource
PostHighDemandSequenceConstraint()2260 void PostHighDemandSequenceConstraint() {
2261 Constraint* constraint = nullptr;
2262 { // Need a block to avoid memory leaks in case the AddConstraint fails
2263 std::vector<IntervalVar*> high_demand_intervals;
2264 high_demand_intervals.reserve(intervals_.size());
2265 for (int i = 0; i < demands_.size(); ++i) {
2266 const int64_t demand = tasks_[i].demand;
2267 // Consider two tasks with demand d1 and d2 such that
2268 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2269 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2270 // > 1/2 (capacity_ + capacity_)
2271 // > capacity_.
2272 // Therefore these two tasks cannot overlap.
2273 if (demand * 2 > capacity_->Max() &&
2274 tasks_[i].interval->MayBePerformed()) {
2275 high_demand_intervals.push_back(tasks_[i].interval);
2276 }
2277 }
2278 if (high_demand_intervals.size() >= 2) {
2279 // If there are less than 2 such intervals, the constraint would do
2280 // nothing
2281 std::string seq_name = absl::StrCat(name(), "-HighDemandSequence");
2282 constraint = solver()->MakeDisjunctiveConstraint(high_demand_intervals,
2283 seq_name);
2284 }
2285 }
2286 if (constraint != nullptr) {
2287 solver()->AddConstraint(constraint);
2288 }
2289 }
2290
2291 // Populate the given vector with useful tasks, meaning the ones on which
2292 // some propagation can be done
PopulateVectorUsefulTasks(bool mirror,std::vector<CumulativeTask * > * const useful_tasks)2293 void PopulateVectorUsefulTasks(
2294 bool mirror, std::vector<CumulativeTask*>* const useful_tasks) {
2295 DCHECK(useful_tasks->empty());
2296 for (int i = 0; i < tasks_.size(); ++i) {
2297 const CumulativeTask& original_task = tasks_[i];
2298 IntervalVar* const interval = original_task.interval;
2299 // Check if exceed capacity
2300 if (original_task.demand > capacity_->Max()) {
2301 interval->SetPerformed(false);
2302 }
2303 // Add to the useful_task vector if it may be performed and that it
2304 // actually consumes some of the resource.
2305 if (interval->MayBePerformed() && original_task.demand > 0) {
2306 Solver* const s = solver();
2307 IntervalVar* const original_interval = original_task.interval;
2308 IntervalVar* const interval =
2309 mirror ? s->MakeMirrorInterval(original_interval)
2310 : original_interval;
2311 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2312 useful_tasks->push_back(
2313 new CumulativeTask(relaxed_max, original_task.demand));
2314 }
2315 }
2316 }
2317
2318 // Makes and return an edge-finder or a time table, or nullptr if it is not
2319 // necessary.
MakeOneSidedConstraint(bool mirror,bool edge_finder,bool tt_sync)2320 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2321 bool tt_sync) {
2322 std::vector<CumulativeTask*> useful_tasks;
2323 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2324 if (useful_tasks.empty()) {
2325 return nullptr;
2326 } else {
2327 Solver* const s = solver();
2328 if (edge_finder) {
2329 const ConstraintSolverParameters& params = solver()->parameters();
2330 return useful_tasks.size() < params.max_edge_finder_size()
2331 ? s->RevAlloc(new EdgeFinder<CumulativeTask>(s, useful_tasks,
2332 capacity_))
2333 : nullptr;
2334 }
2335 if (tt_sync) {
2336 return s->RevAlloc(
2337 new TimeTableSync<CumulativeTask>(s, useful_tasks, capacity_));
2338 }
2339 return s->RevAlloc(
2340 new CumulativeTimeTable<CumulativeTask>(s, useful_tasks, capacity_));
2341 }
2342 }
2343
2344 // Post a straight or mirrored edge-finder, if needed
PostOneSidedConstraint(bool mirror,bool edge_finder,bool tt_sync)2345 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2346 Constraint* const constraint =
2347 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2348 if (constraint != nullptr) {
2349 solver()->AddConstraint(constraint);
2350 }
2351 }
2352
2353 // Capacity of the cumulative resource
2354 IntVar* const capacity_;
2355
2356 // The tasks that share the cumulative resource
2357 std::vector<CumulativeTask> tasks_;
2358
2359 // Array of intervals for the visitor.
2360 const std::vector<IntervalVar*> intervals_;
2361 // Array of demands for the visitor.
2362 const std::vector<int64_t> demands_;
2363
2364 DISALLOW_COPY_AND_ASSIGN(CumulativeConstraint);
2365 };
2366
2367 class VariableDemandCumulativeConstraint : public Constraint {
2368 public:
VariableDemandCumulativeConstraint(Solver * const s,const std::vector<IntervalVar * > & intervals,const std::vector<IntVar * > & demands,IntVar * const capacity,const std::string & name)2369 VariableDemandCumulativeConstraint(Solver* const s,
2370 const std::vector<IntervalVar*>& intervals,
2371 const std::vector<IntVar*>& demands,
2372 IntVar* const capacity,
2373 const std::string& name)
2374 : Constraint(s),
2375 capacity_(capacity),
2376 intervals_(intervals),
2377 demands_(demands) {
2378 tasks_.reserve(intervals.size());
2379 for (int i = 0; i < intervals.size(); ++i) {
2380 tasks_.push_back(VariableCumulativeTask(intervals[i], demands[i]));
2381 }
2382 }
2383
Post()2384 void Post() override {
2385 // For the cumulative constraint, there are many propagators, and they
2386 // don't dominate each other. So the strongest propagation is obtained
2387 // by posting a bunch of different propagators.
2388 const ConstraintSolverParameters& params = solver()->parameters();
2389 if (params.use_cumulative_time_table()) {
2390 PostOneSidedConstraint(false, false, false);
2391 PostOneSidedConstraint(true, false, false);
2392 }
2393 if (params.use_cumulative_edge_finder()) {
2394 PostOneSidedConstraint(false, true, false);
2395 PostOneSidedConstraint(true, true, false);
2396 }
2397 if (params.use_sequence_high_demand_tasks()) {
2398 PostHighDemandSequenceConstraint();
2399 }
2400 if (params.use_all_possible_disjunctions()) {
2401 PostAllDisjunctions();
2402 }
2403 }
2404
InitialPropagate()2405 void InitialPropagate() override {
2406 // Nothing to do: this constraint delegates all the work to other classes
2407 }
2408
Accept(ModelVisitor * const visitor) const2409 void Accept(ModelVisitor* const visitor) const override {
2410 // TODO(user): Build arrays on demand?
2411 visitor->BeginVisitConstraint(ModelVisitor::kCumulative, this);
2412 visitor->VisitIntervalArrayArgument(ModelVisitor::kIntervalsArgument,
2413 intervals_);
2414 visitor->VisitIntegerVariableArrayArgument(ModelVisitor::kDemandsArgument,
2415 demands_);
2416 visitor->VisitIntegerExpressionArgument(ModelVisitor::kCapacityArgument,
2417 capacity_);
2418 visitor->EndVisitConstraint(ModelVisitor::kCumulative, this);
2419 }
2420
DebugString() const2421 std::string DebugString() const override {
2422 return absl::StrFormat("VariableDemandCumulativeConstraint([%s], %s)",
2423 JoinDebugString(tasks_, ", "),
2424 capacity_->DebugString());
2425 }
2426
2427 private:
2428 // Post temporal disjunctions for tasks that cannot overlap.
PostAllDisjunctions()2429 void PostAllDisjunctions() {
2430 for (int i = 0; i < intervals_.size(); ++i) {
2431 IntervalVar* const interval_i = intervals_[i];
2432 if (interval_i->MayBePerformed()) {
2433 for (int j = i + 1; j < intervals_.size(); ++j) {
2434 IntervalVar* const interval_j = intervals_[j];
2435 if (interval_j->MayBePerformed()) {
2436 if (CapAdd(tasks_[i].demand->Min(), tasks_[j].demand->Min()) >
2437 capacity_->Max()) {
2438 Constraint* const constraint =
2439 solver()->MakeTemporalDisjunction(interval_i, interval_j);
2440 solver()->AddConstraint(constraint);
2441 }
2442 }
2443 }
2444 }
2445 }
2446 }
2447
2448 // Post a Sequence constraint for tasks that requires strictly more than half
2449 // of the resource
PostHighDemandSequenceConstraint()2450 void PostHighDemandSequenceConstraint() {
2451 Constraint* constraint = nullptr;
2452 { // Need a block to avoid memory leaks in case the AddConstraint fails
2453 std::vector<IntervalVar*> high_demand_intervals;
2454 high_demand_intervals.reserve(intervals_.size());
2455 for (int i = 0; i < demands_.size(); ++i) {
2456 const int64_t demand = tasks_[i].demand->Min();
2457 // Consider two tasks with demand d1 and d2 such that
2458 // d1 * 2 > capacity_ and d2 * 2 > capacity_.
2459 // Then d1 + d2 = 1/2 (d1 * 2 + d2 * 2)
2460 // > 1/2 (capacity_ + capacity_)
2461 // > capacity_.
2462 // Therefore these two tasks cannot overlap.
2463 if (demand * 2 > capacity_->Max() &&
2464 tasks_[i].interval->MayBePerformed()) {
2465 high_demand_intervals.push_back(tasks_[i].interval);
2466 }
2467 }
2468 if (high_demand_intervals.size() >= 2) {
2469 // If there are less than 2 such intervals, the constraint would do
2470 // nothing
2471 const std::string seq_name =
2472 absl::StrCat(name(), "-HighDemandSequence");
2473 constraint = solver()->MakeStrictDisjunctiveConstraint(
2474 high_demand_intervals, seq_name);
2475 }
2476 }
2477 if (constraint != nullptr) {
2478 solver()->AddConstraint(constraint);
2479 }
2480 }
2481
2482 // Populates the given vector with useful tasks, meaning the ones on which
2483 // some propagation can be done
PopulateVectorUsefulTasks(bool mirror,std::vector<VariableCumulativeTask * > * const useful_tasks)2484 void PopulateVectorUsefulTasks(
2485 bool mirror, std::vector<VariableCumulativeTask*>* const useful_tasks) {
2486 DCHECK(useful_tasks->empty());
2487 for (int i = 0; i < tasks_.size(); ++i) {
2488 const VariableCumulativeTask& original_task = tasks_[i];
2489 IntervalVar* const interval = original_task.interval;
2490 // Check if exceed capacity
2491 if (original_task.demand->Min() > capacity_->Max()) {
2492 interval->SetPerformed(false);
2493 }
2494 // Add to the useful_task vector if it may be performed and that it
2495 // may actually consume some of the resource.
2496 if (interval->MayBePerformed() && original_task.demand->Max() > 0) {
2497 Solver* const s = solver();
2498 IntervalVar* const original_interval = original_task.interval;
2499 IntervalVar* const interval =
2500 mirror ? s->MakeMirrorInterval(original_interval)
2501 : original_interval;
2502 IntervalVar* const relaxed_max = s->MakeIntervalRelaxedMax(interval);
2503 useful_tasks->push_back(
2504 new VariableCumulativeTask(relaxed_max, original_task.demand));
2505 }
2506 }
2507 }
2508
2509 // Makes and returns an edge-finder or a time table, or nullptr if it is not
2510 // necessary.
MakeOneSidedConstraint(bool mirror,bool edge_finder,bool tt_sync)2511 Constraint* MakeOneSidedConstraint(bool mirror, bool edge_finder,
2512 bool tt_sync) {
2513 std::vector<VariableCumulativeTask*> useful_tasks;
2514 PopulateVectorUsefulTasks(mirror, &useful_tasks);
2515 if (useful_tasks.empty()) {
2516 return nullptr;
2517 } else {
2518 Solver* const s = solver();
2519 if (edge_finder) {
2520 return s->RevAlloc(
2521 new EdgeFinder<VariableCumulativeTask>(s, useful_tasks, capacity_));
2522 }
2523 if (tt_sync) {
2524 return s->RevAlloc(new TimeTableSync<VariableCumulativeTask>(
2525 s, useful_tasks, capacity_));
2526 }
2527 return s->RevAlloc(new CumulativeTimeTable<VariableCumulativeTask>(
2528 s, useful_tasks, capacity_));
2529 }
2530 }
2531
2532 // Post a straight or mirrored edge-finder, if needed
PostOneSidedConstraint(bool mirror,bool edge_finder,bool tt_sync)2533 void PostOneSidedConstraint(bool mirror, bool edge_finder, bool tt_sync) {
2534 Constraint* const constraint =
2535 MakeOneSidedConstraint(mirror, edge_finder, tt_sync);
2536 if (constraint != nullptr) {
2537 solver()->AddConstraint(constraint);
2538 }
2539 }
2540
2541 // Capacity of the cumulative resource
2542 IntVar* const capacity_;
2543
2544 // The tasks that share the cumulative resource
2545 std::vector<VariableCumulativeTask> tasks_;
2546
2547 // Array of intervals for the visitor.
2548 const std::vector<IntervalVar*> intervals_;
2549 // Array of demands for the visitor.
2550 const std::vector<IntVar*> demands_;
2551
2552 DISALLOW_COPY_AND_ASSIGN(VariableDemandCumulativeConstraint);
2553 };
2554 } // namespace
2555
2556 // Sequence Constraint
2557
2558 // ----- Public class -----
2559
DisjunctiveConstraint(Solver * const s,const std::vector<IntervalVar * > & intervals,const std::string & name)2560 DisjunctiveConstraint::DisjunctiveConstraint(
2561 Solver* const s, const std::vector<IntervalVar*>& intervals,
2562 const std::string& name)
2563 : Constraint(s), intervals_(intervals) {
2564 if (!name.empty()) {
2565 set_name(name);
2566 }
2567 transition_time_ = [](int64_t x, int64_t y) { return 0; };
2568 }
2569
~DisjunctiveConstraint()2570 DisjunctiveConstraint::~DisjunctiveConstraint() {}
2571
SetTransitionTime(std::function<int64_t (int64_t,int64_t)> transition_time)2572 void DisjunctiveConstraint::SetTransitionTime(
2573 std::function<int64_t(int64_t, int64_t)> transition_time) {
2574 if (transition_time != nullptr) {
2575 transition_time_ = transition_time;
2576 } else {
2577 transition_time_ = [](int64_t x, int64_t y) { return 0; };
2578 }
2579 }
2580
2581 // ---------- Factory methods ----------
2582
MakeDisjunctiveConstraint(const std::vector<IntervalVar * > & intervals,const std::string & name)2583 DisjunctiveConstraint* Solver::MakeDisjunctiveConstraint(
2584 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2585 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, false));
2586 }
2587
MakeStrictDisjunctiveConstraint(const std::vector<IntervalVar * > & intervals,const std::string & name)2588 DisjunctiveConstraint* Solver::MakeStrictDisjunctiveConstraint(
2589 const std::vector<IntervalVar*>& intervals, const std::string& name) {
2590 return RevAlloc(new FullDisjunctiveConstraint(this, intervals, name, true));
2591 }
2592
2593 // Demands are constant
2594
MakeCumulative(const std::vector<IntervalVar * > & intervals,const std::vector<int64_t> & demands,int64_t capacity,const std::string & name)2595 Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2596 const std::vector<int64_t>& demands,
2597 int64_t capacity, const std::string& name) {
2598 CHECK_EQ(intervals.size(), demands.size());
2599 for (int i = 0; i < intervals.size(); ++i) {
2600 CHECK_GE(demands[i], 0);
2601 }
2602 if (capacity == 1 && AreAllOnes(demands)) {
2603 return MakeDisjunctiveConstraint(intervals, name);
2604 }
2605 return RevAlloc(new CumulativeConstraint(this, intervals, demands,
2606 MakeIntConst(capacity), name));
2607 }
2608
MakeCumulative(const std::vector<IntervalVar * > & intervals,const std::vector<int> & demands,int64_t capacity,const std::string & name)2609 Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2610 const std::vector<int>& demands,
2611 int64_t capacity, const std::string& name) {
2612 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2613 }
2614
MakeCumulative(const std::vector<IntervalVar * > & intervals,const std::vector<int64_t> & demands,IntVar * const capacity,const std::string & name)2615 Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2616 const std::vector<int64_t>& demands,
2617 IntVar* const capacity,
2618 const std::string& name) {
2619 CHECK_EQ(intervals.size(), demands.size());
2620 for (int i = 0; i < intervals.size(); ++i) {
2621 CHECK_GE(demands[i], 0);
2622 }
2623 return RevAlloc(
2624 new CumulativeConstraint(this, intervals, demands, capacity, name));
2625 }
2626
MakeCumulative(const std::vector<IntervalVar * > & intervals,const std::vector<int> & demands,IntVar * const capacity,const std::string & name)2627 Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2628 const std::vector<int>& demands,
2629 IntVar* const capacity,
2630 const std::string& name) {
2631 return MakeCumulative(intervals, ToInt64Vector(demands), capacity, name);
2632 }
2633
2634 // Demands are variable
2635
MakeCumulative(const std::vector<IntervalVar * > & intervals,const std::vector<IntVar * > & demands,int64_t capacity,const std::string & name)2636 Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2637 const std::vector<IntVar*>& demands,
2638 int64_t capacity, const std::string& name) {
2639 CHECK_EQ(intervals.size(), demands.size());
2640 for (int i = 0; i < intervals.size(); ++i) {
2641 CHECK_GE(demands[i]->Min(), 0);
2642 }
2643 if (AreAllBound(demands)) {
2644 std::vector<int64_t> fixed_demands(demands.size());
2645 for (int i = 0; i < demands.size(); ++i) {
2646 fixed_demands[i] = demands[i]->Value();
2647 }
2648 return MakeCumulative(intervals, fixed_demands, capacity, name);
2649 }
2650 return RevAlloc(new VariableDemandCumulativeConstraint(
2651 this, intervals, demands, MakeIntConst(capacity), name));
2652 }
2653
MakeCumulative(const std::vector<IntervalVar * > & intervals,const std::vector<IntVar * > & demands,IntVar * const capacity,const std::string & name)2654 Constraint* Solver::MakeCumulative(const std::vector<IntervalVar*>& intervals,
2655 const std::vector<IntVar*>& demands,
2656 IntVar* const capacity,
2657 const std::string& name) {
2658 CHECK_EQ(intervals.size(), demands.size());
2659 for (int i = 0; i < intervals.size(); ++i) {
2660 CHECK_GE(demands[i]->Min(), 0);
2661 }
2662 if (AreAllBound(demands)) {
2663 std::vector<int64_t> fixed_demands(demands.size());
2664 for (int i = 0; i < demands.size(); ++i) {
2665 fixed_demands[i] = demands[i]->Value();
2666 }
2667 return MakeCumulative(intervals, fixed_demands, capacity, name);
2668 }
2669 return RevAlloc(new VariableDemandCumulativeConstraint(
2670 this, intervals, demands, capacity, name));
2671 }
2672 } // namespace operations_research
2673