1 #include "UndoRedo.hpp"
2
3 #include <algorithm>
4 #include <iostream>
5 #include <fstream>
6 #include <memory>
7 #include <typeinfo>
8 #include <cassert>
9 #include <cstddef>
10
11 #include <cereal/types/polymorphic.hpp>
12 #include <cereal/types/map.hpp>
13 #include <cereal/types/string.hpp>
14 #include <cereal/types/utility.hpp>
15 #include <cereal/types/vector.hpp>
16 #include <cereal/archives/binary.hpp>
17 #define CEREAL_FUTURE_EXPERIMENTAL
18 #include <cereal/archives/adapters.hpp>
19
20 #include <libslic3r/PrintConfig.hpp>
21 #include <libslic3r/ObjectID.hpp>
22 #include <libslic3r/Utils.hpp>
23
24 #include <boost/foreach.hpp>
25
26 #ifndef NDEBUG
27 // #define SLIC3R_UNDOREDO_DEBUG
28 #endif /* NDEBUG */
29 #if 0
30 // Stop at a fraction of the normal Undo / Redo stack size.
31 #define UNDO_REDO_DEBUG_LOW_MEM_FACTOR 10000
32 #else
33 #define UNDO_REDO_DEBUG_LOW_MEM_FACTOR 1
34 #endif
35
36 namespace Slic3r {
37 namespace UndoRedo {
38
39 #ifdef SLIC3R_UNDOREDO_DEBUG
ptr_to_string(const void * ptr)40 static inline std::string ptr_to_string(const void* ptr)
41 {
42 char buf[64];
43 sprintf(buf, "%p", ptr);
44 return buf;
45 }
46 #endif
47
SnapshotData()48 SnapshotData::SnapshotData() : printer_technology(ptUnknown), flags(0), layer_range_idx(-1)
49 {
50 }
51
52 static std::string topmost_snapshot_name = "@@@ Topmost @@@";
53
is_topmost() const54 bool Snapshot::is_topmost() const
55 {
56 return this->name == topmost_snapshot_name;
57 }
58
59 // Time interval, start is closed, end is open.
60 struct Interval
61 {
62 public:
IntervalSlic3r::UndoRedo::Interval63 Interval(size_t begin, size_t end) : m_begin(begin), m_end(end) {}
64
beginSlic3r::UndoRedo::Interval65 size_t begin() const { return m_begin; }
endSlic3r::UndoRedo::Interval66 size_t end() const { return m_end; }
67
is_validSlic3r::UndoRedo::Interval68 bool is_valid() const { return m_begin >= 0 && m_begin < m_end; }
69 // This interval comes strictly before the rhs interval.
strictly_beforeSlic3r::UndoRedo::Interval70 bool strictly_before(const Interval &rhs) const { return this->is_valid() && rhs.is_valid() && m_end <= rhs.m_begin; }
71 // This interval comes strictly after the rhs interval.
strictly_afterSlic3r::UndoRedo::Interval72 bool strictly_after(const Interval &rhs) const { return this->is_valid() && rhs.is_valid() && rhs.m_end <= m_begin; }
73
operator <Slic3r::UndoRedo::Interval74 bool operator<(const Interval &rhs) const { return (m_begin < rhs.m_begin) || (m_begin == rhs.m_begin && m_end < rhs.m_end); }
operator ==Slic3r::UndoRedo::Interval75 bool operator==(const Interval &rhs) const { return m_begin == rhs.m_begin && m_end == rhs.m_end; }
76
trim_beginSlic3r::UndoRedo::Interval77 void trim_begin(size_t new_begin) { m_begin = std::max(m_begin, new_begin); }
trim_endSlic3r::UndoRedo::Interval78 void trim_end(size_t new_end) { m_end = std::min(m_end, new_end); }
extend_endSlic3r::UndoRedo::Interval79 void extend_end(size_t new_end) { assert(new_end >= m_end); m_end = new_end; }
80
memsizeSlic3r::UndoRedo::Interval81 size_t memsize() const { return sizeof(this); }
82
83 private:
84 size_t m_begin;
85 size_t m_end;
86 };
87
88 // History of a single object tracked by the Undo / Redo stack. The object may be mutable or immutable.
89 class ObjectHistoryBase
90 {
91 public:
~ObjectHistoryBase()92 virtual ~ObjectHistoryBase() {}
93
94 // Is the object captured by this history mutable or immutable?
95 virtual bool is_mutable() const = 0;
96 virtual bool is_immutable() const = 0;
97 // The object is optional, it may be released if the Undo / Redo stack memory grows over the limits.
is_optional() const98 virtual bool is_optional() const { return false; }
99 // If it is an immutable object, return its pointer. There is a map assigning a temporary ObjectID to the immutable object pointer.
immutable_object_ptr() const100 virtual const void* immutable_object_ptr() const { return nullptr; }
101
102 // If the history is empty, the ObjectHistory object could be released.
103 virtual bool empty() = 0;
104
105 // Release all data before the given timestamp. For the ImmutableObjectHistory, the shared pointer is NOT released.
106 // Return the amount of memory released.
107 virtual size_t release_before_timestamp(size_t timestamp) = 0;
108 // Release all data after the given timestamp. For the ImmutableObjectHistory, the shared pointer is NOT released.
109 // Return the amount of memory released.
110 virtual size_t release_after_timestamp(size_t timestamp) = 0;
111 // Release all optional data of this history.
112 virtual size_t release_optional() = 0;
113 // Restore optional data possibly released by release_optional.
114 virtual void restore_optional() = 0;
115
116 // Estimated size in memory, to be used to drop least recently used snapshots.
117 virtual size_t memsize() const = 0;
118
119 #ifdef SLIC3R_UNDOREDO_DEBUG
120 // Human readable debug information.
121 virtual std::string format() = 0;
122 #endif /* SLIC3R_UNDOREDO_DEBUG */
123
124 #ifndef NDEBUG
125 virtual bool valid() = 0;
126 #endif /* NDEBUG */
127 };
128
129 template<typename T> class ObjectHistory : public ObjectHistoryBase
130 {
131 public:
~ObjectHistory()132 ~ObjectHistory() override {}
133
134 // If the history is empty, the ObjectHistory object could be released.
empty()135 bool empty() override { return m_history.empty(); }
136
137 // Release all data before the given timestamp. For the ImmutableObjectHistory, the shared pointer is NOT released.
release_before_timestamp(size_t timestamp)138 size_t release_before_timestamp(size_t timestamp) override {
139 size_t mem_released = 0;
140 if (! m_history.empty()) {
141 assert(this->valid());
142 // it points to an interval which either starts with timestamp, or follows the timestamp.
143 auto it = std::lower_bound(m_history.begin(), m_history.end(), T(timestamp, timestamp));
144 // Find the first iterator with begin() < timestamp.
145 if (it == m_history.end())
146 -- it;
147 while (it != m_history.begin() && it->begin() >= timestamp)
148 -- it;
149 if (it->begin() < timestamp && it->end() > timestamp) {
150 it->trim_begin(timestamp);
151 if (it != m_history.begin())
152 -- it;
153 }
154 if (it->end() <= timestamp) {
155 auto it_end = ++ it;
156 for (it = m_history.begin(); it != it_end; ++ it)
157 mem_released += it->memsize();
158 m_history.erase(m_history.begin(), it_end);
159 }
160 assert(this->valid());
161 }
162 return mem_released;
163 }
164
165 // Release all data after the given timestamp. The shared pointer is NOT released.
release_after_timestamp(size_t timestamp)166 size_t release_after_timestamp(size_t timestamp) override {
167 size_t mem_released = 0;
168 if (! m_history.empty()) {
169 assert(this->valid());
170 // it points to an interval which either starts with timestamp, or follows the timestamp.
171 auto it = std::lower_bound(m_history.begin(), m_history.end(), T(timestamp, timestamp));
172 if (it != m_history.begin()) {
173 auto it_prev = it;
174 -- it_prev;
175 assert(it_prev->begin() < timestamp);
176 // Trim the last interval with timestamp.
177 it_prev->trim_end(timestamp);
178 }
179 for (auto it2 = it; it2 != m_history.end(); ++ it2)
180 mem_released += it2->memsize();
181 m_history.erase(it, m_history.end());
182 assert(this->valid());
183 }
184 return mem_released;
185 }
186
187 protected:
188 std::vector<T> m_history;
189 };
190
191 // Big objects (mainly the triangle meshes) are tracked by Slicer using the shared pointers
192 // and they are immutable.
193 // The Undo / Redo stack therefore may keep a shared pointer to these immutable objects
194 // and as long as the ref counter of these objects is higher than 1 (1 reference is held
195 // by the Undo / Redo stack), there is no cost associated to holding the object
196 // at the Undo / Redo stack. Once the reference counter drops to 1 (only the Undo / Redo
197 // stack holds the reference), the shared pointer may get serialized (and possibly compressed)
198 // and the shared pointer may be released.
199 // The history of a single immutable object may not be continuous, as an immutable object may
200 // be removed from the scene while being kept at the Copy / Paste stack.
201 template<typename T>
202 class ImmutableObjectHistory : public ObjectHistory<Interval>
203 {
204 public:
ImmutableObjectHistory(std::shared_ptr<const T> shared_object,bool optional)205 ImmutableObjectHistory(std::shared_ptr<const T> shared_object, bool optional) : m_shared_object(shared_object), m_optional(optional) {}
~ImmutableObjectHistory()206 ~ImmutableObjectHistory() override {}
207
is_mutable() const208 bool is_mutable() const override { return false; }
is_immutable() const209 bool is_immutable() const override { return true; }
is_optional() const210 bool is_optional() const override { return m_optional; }
211 // If it is an immutable object, return its pointer. There is a map assigning a temporary ObjectID to the immutable object pointer.
immutable_object_ptr() const212 const void* immutable_object_ptr() const { return (const void*)m_shared_object.get(); }
213
214 // Estimated size in memory, to be used to drop least recently used snapshots.
memsize() const215 size_t memsize() const override {
216 size_t memsize = sizeof(*this);
217 if (this->is_serialized())
218 memsize += m_serialized.size();
219 else if (m_shared_object.use_count() == 1)
220 // Only count the shared object's memsize into the total Undo / Redo stack memsize if it is referenced from the Undo / Redo stack only.
221 memsize += m_shared_object->memsize();
222 memsize += m_history.size() * sizeof(Interval);
223 return memsize;
224 }
225
save(size_t active_snapshot_time,size_t current_time)226 void save(size_t active_snapshot_time, size_t current_time) {
227 assert(m_history.empty() || m_history.back().end() <= active_snapshot_time ||
228 // The snapshot of an immutable object may have already been taken from another mutable object.
229 (m_history.back().begin() <= active_snapshot_time && m_history.back().end() == current_time + 1));
230 if (m_history.empty() || m_history.back().end() < active_snapshot_time)
231 m_history.emplace_back(active_snapshot_time, current_time + 1);
232 else
233 m_history.back().extend_end(current_time + 1);
234 }
235
has_snapshot(size_t timestamp)236 bool has_snapshot(size_t timestamp) {
237 if (m_history.empty())
238 return false;
239 auto it = std::lower_bound(m_history.begin(), m_history.end(), Interval(timestamp, timestamp));
240 if (it == m_history.end() || it->begin() > timestamp) {
241 if (it == m_history.begin())
242 return false;
243 -- it;
244 }
245 return timestamp >= it->begin() && timestamp < it->end();
246 }
247
248 // Release all optional data of this history.
release_optional()249 size_t release_optional() override {
250 size_t mem_released = 0;
251 if (m_optional) {
252 bool released = false;
253 if (this->is_serialized()) {
254 mem_released += m_serialized.size();
255 m_serialized.clear();
256 released = true;
257 } else if (m_shared_object.use_count() == 1) {
258 mem_released += m_shared_object->memsize();
259 m_shared_object.reset();
260 released = true;
261 }
262 if (released) {
263 mem_released += m_history.size() * sizeof(Interval);
264 m_history.clear();
265 }
266 } else if (m_shared_object.use_count() == 1) {
267 // The object is in memory, but it is not shared with the scene. Let the object decide whether there is any optional data to release.
268 const_cast<T*>(m_shared_object.get())->release_optional();
269 }
270 return mem_released;
271 }
272
273 // Restore optional data possibly released by this->release_optional().
restore_optional()274 void restore_optional() override {
275 if (m_shared_object.use_count() == 1)
276 const_cast<T*>(m_shared_object.get())->restore_optional();
277 }
278
is_serialized() const279 bool is_serialized() const { return m_shared_object.get() == nullptr; }
serialized_data() const280 const std::string& serialized_data() const { return m_serialized; }
281 std::shared_ptr<const T>& shared_ptr(StackImpl &stack);
282
283 #ifdef SLIC3R_UNDOREDO_DEBUG
format()284 std::string format() override {
285 std::string out = typeid(T).name();
286 out += this->is_serialized() ?
287 std::string(" len:") + std::to_string(m_serialized.size()) :
288 std::string(" shared_ptr:") + ptr_to_string(m_shared_object.get());
289 for (const Interval &interval : m_history)
290 out += std::string(", <") + std::to_string(interval.begin()) + "," + std::to_string(interval.end()) + ")";
291 return out;
292 }
293 #endif /* SLIC3R_UNDOREDO_DEBUG */
294
295 #ifndef NDEBUG
296 bool valid() override;
297 #endif /* NDEBUG */
298
299 private:
300 // Either the source object is held by a shared pointer and the m_serialized field is empty,
301 // or the shared pointer is null and the object is being serialized into m_serialized.
302 std::shared_ptr<const T> m_shared_object;
303 // If this object is optional, then it may be deleted from the Undo / Redo stack and recalculated from other data (for example mesh convex hull).
304 bool m_optional;
305 std::string m_serialized;
306 };
307
308 struct MutableHistoryInterval
309 {
310 private:
311 struct Data
312 {
313 // Reference counter of this data chunk. We may have used shared_ptr, but the shared_ptr is thread safe
314 // with the associated cost of CPU cache invalidation on refcount change.
315 size_t refcnt;
316 size_t size;
317 char data[1];
318
319 // The serialized data matches the data stored here.
matchesSlic3r::UndoRedo::MutableHistoryInterval::Data320 bool matches(const std::string& rhs) { return this->size == rhs.size() && memcmp(this->data, rhs.data(), this->size) == 0; }
321
322 // The timestamp matches the timestamp serialized in the data stored here.
matches_timestampSlic3r::UndoRedo::MutableHistoryInterval::Data323 bool matches_timestamp(uint64_t timestamp) { assert(timestamp > 0); assert(this->size > 8); return memcmp(this->data, ×tamp, 8) == 0; }
324 };
325
326 Interval m_interval;
327 Data *m_data;
328
329 public:
MutableHistoryIntervalSlic3r::UndoRedo::MutableHistoryInterval330 MutableHistoryInterval(const Interval &interval, const std::string &input_data) : m_interval(interval), m_data(nullptr) {
331 m_data = (Data*)new char[offsetof(Data, data) + input_data.size()];
332 m_data->refcnt = 1;
333 m_data->size = input_data.size();
334 memcpy(m_data->data, input_data.data(), input_data.size());
335 }
336
MutableHistoryIntervalSlic3r::UndoRedo::MutableHistoryInterval337 MutableHistoryInterval(const Interval &interval, MutableHistoryInterval &other) : m_interval(interval), m_data(other.m_data) {
338 ++ m_data->refcnt;
339 }
340
341 // as a key for std::lower_bound
MutableHistoryIntervalSlic3r::UndoRedo::MutableHistoryInterval342 MutableHistoryInterval(const size_t begin, const size_t end) : m_interval(begin, end), m_data(nullptr) {}
343
MutableHistoryIntervalSlic3r::UndoRedo::MutableHistoryInterval344 MutableHistoryInterval(MutableHistoryInterval&& rhs) : m_interval(rhs.m_interval), m_data(rhs.m_data) { rhs.m_data = nullptr; }
operator =Slic3r::UndoRedo::MutableHistoryInterval345 MutableHistoryInterval& operator=(MutableHistoryInterval&& rhs) { m_interval = rhs.m_interval; m_data = rhs.m_data; rhs.m_data = nullptr; return *this; }
346
~MutableHistoryIntervalSlic3r::UndoRedo::MutableHistoryInterval347 ~MutableHistoryInterval() {
348 if (m_data != nullptr && -- m_data->refcnt == 0)
349 delete[] (char*)m_data;
350 }
351
intervalSlic3r::UndoRedo::MutableHistoryInterval352 const Interval& interval() const { return m_interval; }
beginSlic3r::UndoRedo::MutableHistoryInterval353 size_t begin() const { return m_interval.begin(); }
endSlic3r::UndoRedo::MutableHistoryInterval354 size_t end() const { return m_interval.end(); }
trim_beginSlic3r::UndoRedo::MutableHistoryInterval355 void trim_begin(size_t timestamp) { m_interval.trim_begin(timestamp); }
trim_endSlic3r::UndoRedo::MutableHistoryInterval356 void trim_end (size_t timestamp) { m_interval.trim_end(timestamp); }
extend_endSlic3r::UndoRedo::MutableHistoryInterval357 void extend_end(size_t timestamp) { m_interval.extend_end(timestamp); }
358
operator <Slic3r::UndoRedo::MutableHistoryInterval359 bool operator<(const MutableHistoryInterval& rhs) const { return m_interval < rhs.m_interval; }
operator ==Slic3r::UndoRedo::MutableHistoryInterval360 bool operator==(const MutableHistoryInterval& rhs) const { return m_interval == rhs.m_interval; }
361
dataSlic3r::UndoRedo::MutableHistoryInterval362 const char* data() const { return m_data->data; }
sizeSlic3r::UndoRedo::MutableHistoryInterval363 size_t size() const { return m_data->size; }
refcntSlic3r::UndoRedo::MutableHistoryInterval364 size_t refcnt() const { return m_data->refcnt; }
matchesSlic3r::UndoRedo::MutableHistoryInterval365 bool matches(const std::string& data) { return m_data->matches(data); }
matches_timestampSlic3r::UndoRedo::MutableHistoryInterval366 bool matches_timestamp(uint64_t timestamp) { return m_data->matches_timestamp(timestamp); }
memsizeSlic3r::UndoRedo::MutableHistoryInterval367 size_t memsize() const {
368 return m_data->refcnt == 1 ?
369 // Count just the size of the snapshot data.
370 m_data->size :
371 // Count the size of the snapshot data divided by the number of references, rounded up.
372 (m_data->size + m_data->refcnt - 1) / m_data->refcnt;
373 }
374
375 private:
376 MutableHistoryInterval(const MutableHistoryInterval &rhs);
377 MutableHistoryInterval& operator=(const MutableHistoryInterval &rhs);
378 };
379
380 // Smaller objects (Model, ModelObject, ModelInstance, ModelVolume, DynamicPrintConfig)
381 // are mutable and there is not tracking of the changes, therefore a snapshot needs to be
382 // taken every time and compared to the previous data at the Undo / Redo stack.
383 // The serialized data is stored if it is different from the last value on the stack, otherwise
384 // the serialized data is discarded.
385 // The history of a single mutable object may not be continuous, as an mutable object may
386 // be removed from the scene while being kept at the Copy / Paste stack, therefore an object snapshot
387 // with the same serialized object data may be shared by multiple history intervals.
388 template<typename T>
389 class MutableObjectHistory : public ObjectHistory<MutableHistoryInterval>
390 {
391 public:
~MutableObjectHistory()392 ~MutableObjectHistory() override {}
393
is_mutable() const394 bool is_mutable() const override { return true; }
is_immutable() const395 bool is_immutable() const override { return false; }
396
397 // Estimated size in memory, to be used to drop least recently used snapshots.
memsize() const398 size_t memsize() const override {
399 size_t memsize = sizeof(*this);
400 memsize += m_history.size() * sizeof(MutableHistoryInterval);
401 for (const MutableHistoryInterval &interval : m_history)
402 memsize += interval.memsize();
403 return memsize;
404 }
405
406 // If an object provides a reliable timestamp and the object serializes the timestamp first,
407 // then we may just check the validity of the timestamp against the last snapshot without
408 // having to serialize the whole object. This reduces the amount of serialization and memcmp
409 // when taking a snapshot.
try_save_timestamp(size_t active_snapshot_time,size_t current_time,uint64_t timestamp)410 bool try_save_timestamp(size_t active_snapshot_time, size_t current_time, uint64_t timestamp) {
411 assert(m_history.empty() || m_history.back().end() <= active_snapshot_time);
412 if (! m_history.empty() && m_history.back().matches_timestamp(timestamp)) {
413 if (m_history.back().end() < active_snapshot_time)
414 // Share the previous data by reference counting.
415 m_history.emplace_back(Interval(current_time, current_time + 1), m_history.back());
416 else {
417 assert(m_history.back().end() == active_snapshot_time);
418 // Just extend the last interval using the old data.
419 m_history.back().extend_end(current_time + 1);
420 }
421 return true;
422 }
423 // The timestamp is not valid, the caller has to call this->save() with the serialized data.
424 return false;
425 }
426
save(size_t active_snapshot_time,size_t current_time,const std::string & data)427 void save(size_t active_snapshot_time, size_t current_time, const std::string &data) {
428 assert(m_history.empty() || m_history.back().end() <= active_snapshot_time);
429 if (m_history.empty() || m_history.back().end() < active_snapshot_time) {
430 if (! m_history.empty() && m_history.back().matches(data))
431 // Share the previous data by reference counting.
432 m_history.emplace_back(Interval(current_time, current_time + 1), m_history.back());
433 else
434 // Allocate new data.
435 m_history.emplace_back(Interval(current_time, current_time + 1), data);
436 } else {
437 assert(! m_history.empty());
438 assert(m_history.back().end() == active_snapshot_time);
439 if (m_history.back().matches(data))
440 // Just extend the last interval using the old data.
441 m_history.back().extend_end(current_time + 1);
442 else
443 // Allocate new data time continuous with the previous data.
444 m_history.emplace_back(Interval(active_snapshot_time, current_time + 1), data);
445 }
446 }
447
load(size_t timestamp) const448 std::string load(size_t timestamp) const {
449 assert(! m_history.empty());
450 auto it = std::lower_bound(m_history.begin(), m_history.end(), MutableHistoryInterval(timestamp, timestamp));
451 if (it == m_history.end() || it->begin() > timestamp) {
452 assert(it != m_history.begin());
453 -- it;
454 }
455 assert(timestamp >= it->begin() && timestamp < it->end());
456 return std::string(it->data(), it->data() + it->size());
457 }
458
459 // Currently all mutable snapshots are mandatory.
release_optional()460 size_t release_optional() override { return 0; }
461 // Currently there is no way to release optional data from the mutable objects.
restore_optional()462 void restore_optional() override {}
463
464 #ifdef SLIC3R_UNDOREDO_DEBUG
format()465 std::string format() override {
466 std::string out = typeid(T).name();
467 for (const MutableHistoryInterval &interval : m_history)
468 out += std::string(", ptr:") + ptr_to_string(interval.data()) + " len:" + std::to_string(interval.size()) + " <" + std::to_string(interval.begin()) + "," + std::to_string(interval.end()) + ")";
469 return out;
470 }
471 #endif /* SLIC3R_UNDOREDO_DEBUG */
472
473 #ifndef NDEBUG
474 bool valid() override;
475 #endif /* NDEBUG */
476 };
477
478 #ifndef NDEBUG
479 template<typename T>
valid()480 bool ImmutableObjectHistory<T>::valid()
481 {
482 // The immutable object content is captured either by a shared object, or by its serialization, but not both.
483 assert(! m_shared_object == ! m_serialized.empty());
484 // Verify that the history intervals are sorted and do not overlap.
485 if (! m_history.empty())
486 for (size_t i = 1; i < m_history.size(); ++ i)
487 assert(m_history[i - 1].strictly_before(m_history[i]));
488 return true;
489 }
490 #endif /* NDEBUG */
491
492 #ifndef NDEBUG
493 template<typename T>
valid()494 bool MutableObjectHistory<T>::valid()
495 {
496 // Verify that the history intervals are sorted and do not overlap, and that the data reference counters are correct.
497 if (! m_history.empty()) {
498 std::map<const char*, size_t> refcntrs;
499 assert(m_history.front().data() != nullptr);
500 ++ refcntrs[m_history.front().data()];
501 for (size_t i = 1; i < m_history.size(); ++ i) {
502 assert(m_history[i - 1].interval().strictly_before(m_history[i].interval()));
503 ++ refcntrs[m_history[i].data()];
504 }
505 for (const auto &hi : m_history) {
506 assert(hi.data() != nullptr);
507 assert(refcntrs[hi.data()] == hi.refcnt());
508 }
509 }
510 return true;
511 }
512 #endif /* NDEBUG */
513
514 class StackImpl
515 {
516 public:
517 // Stack needs to be initialized. An empty stack is not valid, there must be a "New Project" status stored at the beginning.
518 // Initially enable Undo / Redo stack to occupy maximum 10% of the total system physical memory.
StackImpl()519 StackImpl() : m_memory_limit(std::min(Slic3r::total_physical_memory() / 10, size_t(1 * 16384 * 65536 / UNDO_REDO_DEBUG_LOW_MEM_FACTOR))), m_active_snapshot_time(0), m_current_time(0) {}
520
clear()521 void clear() {
522 m_objects.clear();
523 m_shared_ptr_to_object_id.clear();
524 m_snapshots.clear();
525 m_active_snapshot_time = 0;
526 m_current_time = 0;
527 m_selection.clear();
528 }
529
empty() const530 bool empty() const {
531 assert(m_objects.empty() == m_snapshots.empty());
532 assert(! m_objects.empty() || (m_current_time == 0 && m_active_snapshot_time == 0));
533 return m_snapshots.empty();
534 }
535
set_memory_limit(size_t memsize)536 void set_memory_limit(size_t memsize) { m_memory_limit = memsize; }
get_memory_limit() const537 size_t get_memory_limit() const { return m_memory_limit; }
538
memsize() const539 size_t memsize() const {
540 size_t memsize = 0;
541 for (const auto &object : m_objects)
542 memsize += object.second->memsize();
543 return memsize;
544 }
545
546 // Store the current application state onto the Undo / Redo stack, remove all snapshots after m_active_snapshot_time.
547 void take_snapshot(const std::string& snapshot_name, const Slic3r::Model& model, const Slic3r::GUI::Selection& selection, const Slic3r::GUI::GLGizmosManager& gizmos, const SnapshotData &snapshot_data);
548 void load_snapshot(size_t timestamp, Slic3r::Model& model, Slic3r::GUI::GLGizmosManager& gizmos);
549
550 bool has_undo_snapshot() const;
551 bool has_undo_snapshot(size_t time_to_load) const;
552 bool has_redo_snapshot() const;
553 bool undo(Slic3r::Model &model, const Slic3r::GUI::Selection &selection, Slic3r::GUI::GLGizmosManager &gizmos, const SnapshotData &snapshot_data, size_t jump_to_time);
554 bool redo(Slic3r::Model &model, Slic3r::GUI::GLGizmosManager &gizmos, size_t jump_to_time);
555 void release_least_recently_used();
556
557 // Snapshot history (names with timestamps).
snapshots() const558 const std::vector<Snapshot>& snapshots() const { return m_snapshots; }
559 // Timestamp of the active snapshot.
active_snapshot_time() const560 size_t active_snapshot_time() const { return m_active_snapshot_time; }
temp_snapshot_active() const561 bool temp_snapshot_active() const { return m_snapshots.back().timestamp == m_active_snapshot_time && ! m_snapshots.back().is_topmost_captured(); }
562
selection_deserialized() const563 const Selection& selection_deserialized() const { return m_selection; }
564
565 //protected:
566 template<typename T> ObjectID save_mutable_object(const T &object);
567 template<typename T> ObjectID save_immutable_object(std::shared_ptr<const T> &object, bool optional);
568 template<typename T> T* load_mutable_object(const Slic3r::ObjectID id);
569 template<typename T> std::shared_ptr<const T> load_immutable_object(const Slic3r::ObjectID id, bool optional);
570 template<typename T> void load_mutable_object(const Slic3r::ObjectID id, T &target);
571
572 #ifdef SLIC3R_UNDOREDO_DEBUG
format() const573 std::string format() const {
574 std::string out = "Objects\n";
575 for (const std::pair<const ObjectID, std::unique_ptr<ObjectHistoryBase>> &kvp : m_objects)
576 out += std::string("ObjectID:") + std::to_string(kvp.first.id) + " " + kvp.second->format() + "\n";
577 out += "Snapshots\n";
578 for (const Snapshot &snapshot : m_snapshots) {
579 if (snapshot.timestamp == m_active_snapshot_time)
580 out += ">>> ";
581 out += std::string("Name: \"") + snapshot.name + "\", timestamp: " + std::to_string(snapshot.timestamp) +
582 ", Model ID:" + ((snapshot.model_id == 0) ? "Invalid" : std::to_string(snapshot.model_id)) + "\n";
583 }
584 if (m_active_snapshot_time > m_snapshots.back().timestamp)
585 out += ">>>\n";
586 out += "Current time: " + std::to_string(m_current_time) + "\n";
587 out += "Total memory occupied: " + std::to_string(this->memsize()) + "\n";
588 return out;
589 }
print() const590 void print() const {
591 std::cout << "Undo / Redo stack" << std::endl;
592 std::cout << this->format() << std::endl;
593 }
594 #endif /* SLIC3R_UNDOREDO_DEBUG */
595
596
597 #ifndef NDEBUG
valid() const598 bool valid() const {
599 assert(! m_snapshots.empty());
600 assert(m_snapshots.back().is_topmost());
601 auto it = std::lower_bound(m_snapshots.begin(), m_snapshots.end(), Snapshot(m_active_snapshot_time));
602 assert(it != m_snapshots.begin() && it != m_snapshots.end() && it->timestamp == m_active_snapshot_time);
603 assert(m_active_snapshot_time <= m_snapshots.back().timestamp);
604 for (auto it = m_objects.begin(); it != m_objects.end(); ++ it)
605 assert(it->second->valid());
606 return true;
607 }
608 #endif /* NDEBUG */
609
610 private:
immutable_object_id(const std::shared_ptr<const T> & ptr)611 template<typename T> ObjectID immutable_object_id(const std::shared_ptr<const T> &ptr) {
612 return this->immutable_object_id_impl((const void*)ptr.get());
613 }
immutable_object_id_impl(const void * ptr)614 ObjectID immutable_object_id_impl(const void *ptr) {
615 auto it = m_shared_ptr_to_object_id.find(ptr);
616 if (it == m_shared_ptr_to_object_id.end()) {
617 // Allocate a new temporary ObjectID for this shared pointer.
618 ObjectBase object_with_id;
619 it = m_shared_ptr_to_object_id.insert(it, std::make_pair(ptr, object_with_id.id()));
620 }
621 return it->second;
622 }
623 void collect_garbage();
624
625 // Maximum memory allowed to be occupied by the Undo / Redo stack. If the limit is exceeded,
626 // least recently used snapshots will be released.
627 size_t m_memory_limit;
628 // Each individual object (Model, ModelObject, ModelInstance, ModelVolume, Selection, TriangleMesh)
629 // is stored with its own history, referenced by the ObjectID. Immutable objects do not provide
630 // their own IDs, therefore there are temporary IDs generated for them and stored to m_shared_ptr_to_object_id.
631 std::map<ObjectID, std::unique_ptr<ObjectHistoryBase>> m_objects;
632 std::map<const void*, ObjectID> m_shared_ptr_to_object_id;
633 // Snapshot history (names with timestamps).
634 std::vector<Snapshot> m_snapshots;
635 // Timestamp of the active snapshot.
636 size_t m_active_snapshot_time;
637 // Logical time counter. m_current_time is being incremented with each snapshot taken.
638 size_t m_current_time;
639 // Last selection serialized or deserialized.
640 Selection m_selection;
641 };
642
643 using InputArchive = cereal::UserDataAdapter<StackImpl, cereal::BinaryInputArchive>;
644 using OutputArchive = cereal::UserDataAdapter<StackImpl, cereal::BinaryOutputArchive>;
645
646 } // namespace UndoRedo
647
648 class Model;
649 class ModelObject;
650 class ModelVolume;
651 class ModelInstance;
652 class ModelMaterial;
653 class DynamicPrintConfig;
654 class TriangleMesh;
655
656 } // namespace Slic3r
657
658 namespace cereal
659 {
660 // Let cereal know that there are load / save non-member functions declared for ModelObject*, ignore serialization of pointers triggering
661 // static assert, that cereal does not support serialization of raw pointers.
662 template <class Archive> struct specialize<Archive, Slic3r::Model*, cereal::specialization::non_member_load_save> {};
663 template <class Archive> struct specialize<Archive, Slic3r::ModelObject*, cereal::specialization::non_member_load_save> {};
664 template <class Archive> struct specialize<Archive, Slic3r::ModelVolume*, cereal::specialization::non_member_load_save> {};
665 template <class Archive> struct specialize<Archive, Slic3r::ModelInstance*, cereal::specialization::non_member_load_save> {};
666 template <class Archive> struct specialize<Archive, Slic3r::ModelMaterial*, cereal::specialization::non_member_load_save> {};
667 template <class Archive> struct specialize<Archive, std::shared_ptr<Slic3r::TriangleMesh>, cereal::specialization::non_member_load_save> {};
668
669 // Store ObjectBase derived class onto the Undo / Redo stack as a separate object,
670 // store just the ObjectID to this stream.
save(BinaryOutputArchive & ar,T * const & ptr)671 template <class T> void save(BinaryOutputArchive& ar, T* const& ptr)
672 {
673 ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_mutable_object<T>(*ptr));
674 }
675
676 // Load ObjectBase derived class from the Undo / Redo stack as a separate object
677 // based on the ObjectID loaded from this stream.
load(BinaryInputArchive & ar,T * & ptr)678 template <class T> void load(BinaryInputArchive& ar, T*& ptr)
679 {
680 Slic3r::UndoRedo::StackImpl& stack = cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar);
681 size_t id;
682 ar(id);
683 ptr = stack.load_mutable_object<T>(Slic3r::ObjectID(id));
684 }
685
686 // Store ObjectBase derived class onto the Undo / Redo stack as a separate object,
687 // store just the ObjectID to this stream.
save(BinaryOutputArchive & ar,const std::unique_ptr<T> & ptr)688 template <class T> void save(BinaryOutputArchive &ar, const std::unique_ptr<T> &ptr)
689 {
690 ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_mutable_object<T>(*ptr.get()));
691 }
692
693 // Load ObjectBase derived class from the Undo / Redo stack as a separate object
694 // based on the ObjectID loaded from this stream.
load(BinaryInputArchive & ar,std::unique_ptr<T> & ptr)695 template <class T> void load(BinaryInputArchive &ar, std::unique_ptr<T> &ptr)
696 {
697 Slic3r::UndoRedo::StackImpl& stack = cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar);
698 size_t id;
699 ar(id);
700 ptr.reset(stack.load_mutable_object<T>(Slic3r::ObjectID(id)));
701 }
702
703 // Store ObjectBase derived class onto the Undo / Redo stack as a separate object,
704 // store just the ObjectID to this stream.
save_by_value(BinaryOutputArchive & ar,const T & cfg)705 template<class T> void save_by_value(BinaryOutputArchive& ar, const T &cfg)
706 {
707 ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_mutable_object<T>(cfg));
708 }
709 // Load ObjectBase derived class from the Undo / Redo stack as a separate object
710 // based on the ObjectID loaded from this stream.
load_by_value(BinaryInputArchive & ar,T & cfg)711 template<class T> void load_by_value(BinaryInputArchive& ar, T &cfg)
712 {
713 Slic3r::UndoRedo::StackImpl& stack = cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar);
714 size_t id;
715 ar(id);
716 stack.load_mutable_object<T>(Slic3r::ObjectID(id), cfg);
717 }
718
719 // Store ObjectBase derived class onto the Undo / Redo stack as a separate object,
720 // store just the ObjectID to this stream.
save(BinaryOutputArchive & ar,const std::shared_ptr<const T> & ptr)721 template <class T> void save(BinaryOutputArchive &ar, const std::shared_ptr<const T> &ptr)
722 {
723 ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_immutable_object<T>(const_cast<std::shared_ptr<const T>&>(ptr), false));
724 }
save_optional(BinaryOutputArchive & ar,const std::shared_ptr<const T> & ptr)725 template <class T> void save_optional(BinaryOutputArchive &ar, const std::shared_ptr<const T> &ptr)
726 {
727 ar(cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar).save_immutable_object<T>(const_cast<std::shared_ptr<const T>&>(ptr), true));
728 }
729
730 // Load ObjectBase derived class from the Undo / Redo stack as a separate object
731 // based on the ObjectID loaded from this stream.
load(BinaryInputArchive & ar,std::shared_ptr<const T> & ptr)732 template <class T> void load(BinaryInputArchive &ar, std::shared_ptr<const T> &ptr)
733 {
734 Slic3r::UndoRedo::StackImpl &stack = cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar);
735 size_t id;
736 ar(id);
737 ptr = stack.load_immutable_object<T>(Slic3r::ObjectID(id), false);
738 }
load_optional(BinaryInputArchive & ar,std::shared_ptr<const T> & ptr)739 template <class T> void load_optional(BinaryInputArchive &ar, std::shared_ptr<const T> &ptr)
740 {
741 Slic3r::UndoRedo::StackImpl &stack = cereal::get_user_data<Slic3r::UndoRedo::StackImpl>(ar);
742 size_t id;
743 ar(id);
744 ptr = stack.load_immutable_object<T>(Slic3r::ObjectID(id), true);
745 }
746 }
747
748 #include <libslic3r/Model.hpp>
749 #include <libslic3r/TriangleMesh.hpp>
750 #include <slic3r/GUI/Selection.hpp>
751 #include <slic3r/GUI/Gizmos/GLGizmosManager.hpp>
752
753 namespace Slic3r {
754 namespace UndoRedo {
755
shared_ptr(StackImpl & stack)756 template<typename T> std::shared_ptr<const T>& ImmutableObjectHistory<T>::shared_ptr(StackImpl &stack)
757 {
758 if (m_shared_object.get() == nullptr && ! this->m_serialized.empty()) {
759 // Deserialize the object.
760 std::istringstream iss(m_serialized);
761 {
762 Slic3r::UndoRedo::InputArchive archive(stack, iss);
763 typedef typename std::remove_const<T>::type Type;
764 std::unique_ptr<Type> mesh(new Type());
765 archive(*mesh.get());
766 m_shared_object = std::move(mesh);
767 }
768 }
769 return m_shared_object;
770 }
771
save_mutable_object(const T & object)772 template<typename T> ObjectID StackImpl::save_mutable_object(const T &object)
773 {
774 // First find or allocate a history stack for the ObjectID of this object instance.
775 auto it_object_history = m_objects.find(object.id());
776 if (it_object_history == m_objects.end())
777 it_object_history = m_objects.insert(it_object_history, std::make_pair(object.id(), std::unique_ptr<MutableObjectHistory<T>>(new MutableObjectHistory<T>())));
778 auto *object_history = static_cast<MutableObjectHistory<T>*>(it_object_history->second.get());
779 bool needs_to_save = true;
780 {
781 // If the timestamp returned is non zero, then it is considered reliable.
782 // The caller is supposed to serialize the timestamp first.
783 uint64_t timestamp = object.timestamp();
784 if (timestamp > 0)
785 needs_to_save = ! object_history->try_save_timestamp(m_active_snapshot_time, m_current_time, timestamp);
786 }
787 if (needs_to_save) {
788 // Serialize the object into a string.
789 std::ostringstream oss;
790 {
791 Slic3r::UndoRedo::OutputArchive archive(*this, oss);
792 archive(object);
793 }
794 object_history->save(m_active_snapshot_time, m_current_time, oss.str());
795 }
796 return object.id();
797 }
798
save_immutable_object(std::shared_ptr<const T> & object,bool optional)799 template<typename T> ObjectID StackImpl::save_immutable_object(std::shared_ptr<const T> &object, bool optional)
800 {
801 // First allocate a temporary ObjectID for this pointer.
802 ObjectID object_id = this->immutable_object_id(object);
803 // and find or allocate a history stack for the ObjectID associated to this shared_ptr.
804 auto it_object_history = m_objects.find(object_id);
805 if (it_object_history == m_objects.end())
806 it_object_history = m_objects.emplace_hint(it_object_history, object_id, std::unique_ptr<ImmutableObjectHistory<T>>(new ImmutableObjectHistory<T>(object, optional)));
807 else
808 assert(it_object_history->second.get()->is_optional() == optional);
809 // Then save the interval.
810 static_cast<ImmutableObjectHistory<T>*>(it_object_history->second.get())->save(m_active_snapshot_time, m_current_time);
811 return object_id;
812 }
813
load_mutable_object(const Slic3r::ObjectID id)814 template<typename T> T* StackImpl::load_mutable_object(const Slic3r::ObjectID id)
815 {
816 T *target = new T();
817 this->load_mutable_object<T>(id, *target);
818 return target;
819 }
820
load_immutable_object(const Slic3r::ObjectID id,bool optional)821 template<typename T> std::shared_ptr<const T> StackImpl::load_immutable_object(const Slic3r::ObjectID id, bool optional)
822 {
823 // First find a history stack for the ObjectID of this object instance.
824 auto it_object_history = m_objects.find(id);
825 assert(optional || it_object_history != m_objects.end());
826 if (it_object_history == m_objects.end())
827 return std::shared_ptr<const T>();
828 auto *object_history = static_cast<ImmutableObjectHistory<T>*>(it_object_history->second.get());
829 assert(object_history->has_snapshot(m_active_snapshot_time));
830 object_history->restore_optional();
831 return object_history->shared_ptr(*this);
832 }
833
load_mutable_object(const Slic3r::ObjectID id,T & target)834 template<typename T> void StackImpl::load_mutable_object(const Slic3r::ObjectID id, T &target)
835 {
836 // First find a history stack for the ObjectID of this object instance.
837 auto it_object_history = m_objects.find(id);
838 assert(it_object_history != m_objects.end());
839 auto *object_history = static_cast<const MutableObjectHistory<T>*>(it_object_history->second.get());
840 // Then get the data associated with the object history and m_active_snapshot_time.
841 std::istringstream iss(object_history->load(m_active_snapshot_time));
842 Slic3r::UndoRedo::InputArchive archive(*this, iss);
843 target.m_id = id;
844 archive(target);
845 }
846
847 // Store the current application state onto the Undo / Redo stack, remove all snapshots after m_active_snapshot_time.
take_snapshot(const std::string & snapshot_name,const Slic3r::Model & model,const Slic3r::GUI::Selection & selection,const Slic3r::GUI::GLGizmosManager & gizmos,const SnapshotData & snapshot_data)848 void StackImpl::take_snapshot(const std::string& snapshot_name, const Slic3r::Model& model, const Slic3r::GUI::Selection& selection, const Slic3r::GUI::GLGizmosManager& gizmos, const SnapshotData &snapshot_data)
849 {
850 // Release old snapshot data.
851 assert(m_active_snapshot_time <= m_current_time);
852 for (auto &kvp : m_objects)
853 kvp.second->release_after_timestamp(m_active_snapshot_time);
854 {
855 auto it = std::lower_bound(m_snapshots.begin(), m_snapshots.end(), Snapshot(m_active_snapshot_time));
856 m_snapshots.erase(it, m_snapshots.end());
857 }
858 // Take new snapshots.
859 this->save_mutable_object<Slic3r::Model>(model);
860 m_selection.volumes_and_instances.clear();
861 m_selection.volumes_and_instances.reserve(selection.get_volume_idxs().size());
862 m_selection.mode = selection.get_mode();
863 for (unsigned int volume_idx : selection.get_volume_idxs())
864 m_selection.volumes_and_instances.emplace_back(selection.get_volume(volume_idx)->geometry_id);
865 this->save_mutable_object<Selection>(m_selection);
866 this->save_mutable_object<Slic3r::GUI::GLGizmosManager>(gizmos);
867 // Save the snapshot info.
868 m_snapshots.emplace_back(snapshot_name, m_current_time ++, model.id().id, snapshot_data);
869 m_active_snapshot_time = m_current_time;
870 // Save snapshot info of the last "current" aka "top most" state, that is only being serialized
871 // if undoing an action. Such a snapshot has an invalid Model ID assigned if it was not taken yet.
872 m_snapshots.emplace_back(topmost_snapshot_name, m_active_snapshot_time, 0, snapshot_data);
873 // Release empty objects from the history.
874 this->collect_garbage();
875 assert(this->valid());
876 #ifdef SLIC3R_UNDOREDO_DEBUG
877 std::cout << "After snapshot" << std::endl;
878 this->print();
879 #endif /* SLIC3R_UNDOREDO_DEBUG */
880 }
881
load_snapshot(size_t timestamp,Slic3r::Model & model,Slic3r::GUI::GLGizmosManager & gizmos)882 void StackImpl::load_snapshot(size_t timestamp, Slic3r::Model& model, Slic3r::GUI::GLGizmosManager& gizmos)
883 {
884 // Find the snapshot by time. It must exist.
885 const auto it_snapshot = std::lower_bound(m_snapshots.begin(), m_snapshots.end(), Snapshot(timestamp));
886 if (it_snapshot == m_snapshots.end() || it_snapshot->timestamp != timestamp)
887 throw Slic3r::RuntimeError((boost::format("Snapshot with timestamp %1% does not exist") % timestamp).str());
888
889 m_active_snapshot_time = timestamp;
890 model.clear_objects();
891 model.clear_materials();
892 this->load_mutable_object<Slic3r::Model>(ObjectID(it_snapshot->model_id), model);
893 model.update_links_bottom_up_recursive();
894 m_selection.volumes_and_instances.clear();
895 this->load_mutable_object<Selection>(m_selection.id(), m_selection);
896 //gizmos.reset_all_states(); FIXME: is this really necessary? It is quite unpleasant for the gizmo undo/redo substack
897 this->load_mutable_object<Slic3r::GUI::GLGizmosManager>(gizmos.id(), gizmos);
898 // Sort the volumes so that we may use binary search.
899 std::sort(m_selection.volumes_and_instances.begin(), m_selection.volumes_and_instances.end());
900 this->m_active_snapshot_time = timestamp;
901 assert(this->valid());
902 }
903
has_undo_snapshot() const904 bool StackImpl::has_undo_snapshot() const
905 {
906 assert(this->valid());
907 auto it = std::lower_bound(m_snapshots.begin(), m_snapshots.end(), Snapshot(m_active_snapshot_time));
908 return -- it != m_snapshots.begin();
909 }
910
has_undo_snapshot(size_t time_to_load) const911 bool StackImpl::has_undo_snapshot(size_t time_to_load) const
912 {
913 return time_to_load < m_active_snapshot_time && std::binary_search(m_snapshots.begin(), m_snapshots.end(), Snapshot(time_to_load));
914 }
915
has_redo_snapshot() const916 bool StackImpl::has_redo_snapshot() const
917 {
918 assert(this->valid());
919 auto it = std::lower_bound(m_snapshots.begin(), m_snapshots.end(), Snapshot(m_active_snapshot_time));
920 return ++ it != m_snapshots.end();
921 }
922
undo(Slic3r::Model & model,const Slic3r::GUI::Selection & selection,Slic3r::GUI::GLGizmosManager & gizmos,const SnapshotData & snapshot_data,size_t time_to_load)923 bool StackImpl::undo(Slic3r::Model &model, const Slic3r::GUI::Selection &selection, Slic3r::GUI::GLGizmosManager &gizmos, const SnapshotData &snapshot_data, size_t time_to_load)
924 {
925 assert(this->valid());
926 if (time_to_load == SIZE_MAX) {
927 auto it_current = std::lower_bound(m_snapshots.begin(), m_snapshots.end(), Snapshot(m_active_snapshot_time));
928 if (-- it_current == m_snapshots.begin())
929 return false;
930 time_to_load = it_current->timestamp;
931 }
932 assert(time_to_load < m_active_snapshot_time);
933 assert(std::binary_search(m_snapshots.begin(), m_snapshots.end(), Snapshot(time_to_load)));
934 bool new_snapshot_taken = false;
935 if (m_active_snapshot_time == m_snapshots.back().timestamp && ! m_snapshots.back().is_topmost_captured()) {
936 // The current state is temporary. The current state needs to be captured to be redoable.
937 this->take_snapshot(topmost_snapshot_name, model, selection, gizmos, snapshot_data);
938 // The line above entered another topmost_snapshot_name.
939 assert(m_snapshots.back().is_topmost());
940 assert(! m_snapshots.back().is_topmost_captured());
941 // Pop it back, it is not needed as there is now a captured topmost state.
942 m_snapshots.pop_back();
943 // current_time was extended, but it should not cause any harm. Resetting it back may complicate the logic unnecessarily.
944 //-- m_current_time;
945 assert(m_snapshots.back().is_topmost());
946 assert(m_snapshots.back().is_topmost_captured());
947 new_snapshot_taken = true;
948 }
949 this->load_snapshot(time_to_load, model, gizmos);
950 if (new_snapshot_taken) {
951 // Release old snapshots if the memory allocated due to capturing the top most state is excessive.
952 // Don't release the snapshots here, release them first after the scene and background processing gets updated, as this will release some references
953 // to the shared TriangleMeshes.
954 //this->release_least_recently_used();
955 }
956 #ifdef SLIC3R_UNDOREDO_DEBUG
957 std::cout << "After undo" << std::endl;
958 this->print();
959 #endif /* SLIC3R_UNDOREDO_DEBUG */
960 return true;
961 }
962
redo(Slic3r::Model & model,Slic3r::GUI::GLGizmosManager & gizmos,size_t time_to_load)963 bool StackImpl::redo(Slic3r::Model& model, Slic3r::GUI::GLGizmosManager& gizmos, size_t time_to_load)
964 {
965 assert(this->valid());
966 if (time_to_load == SIZE_MAX) {
967 auto it_current = std::lower_bound(m_snapshots.begin(), m_snapshots.end(), Snapshot(m_active_snapshot_time));
968 if (++ it_current == m_snapshots.end())
969 return false;
970 time_to_load = it_current->timestamp;
971 }
972 assert(time_to_load > m_active_snapshot_time);
973 assert(std::binary_search(m_snapshots.begin(), m_snapshots.end(), Snapshot(time_to_load)));
974 this->load_snapshot(time_to_load, model, gizmos);
975 #ifdef SLIC3R_UNDOREDO_DEBUG
976 std::cout << "After redo" << std::endl;
977 this->print();
978 #endif /* SLIC3R_UNDOREDO_DEBUG */
979 return true;
980 }
981
collect_garbage()982 void StackImpl::collect_garbage()
983 {
984 // Purge objects with empty histories.
985 for (auto it = m_objects.begin(); it != m_objects.end();) {
986 if (it->second->empty()) {
987 if (it->second->immutable_object_ptr() != nullptr)
988 // Release the immutable object from the ptr to ObjectID map.
989 m_shared_ptr_to_object_id.erase(it->second->immutable_object_ptr());
990 it = m_objects.erase(it);
991 } else
992 ++ it;
993 }
994 }
995
release_least_recently_used()996 void StackImpl::release_least_recently_used()
997 {
998 assert(this->valid());
999 size_t current_memsize = this->memsize();
1000 #ifdef SLIC3R_UNDOREDO_DEBUG
1001 bool released = false;
1002 #endif
1003 // First try to release the optional immutable data (for example the convex hulls),
1004 // or the shared vertices of triangle meshes.
1005 for (auto it = m_objects.begin(); current_memsize > m_memory_limit && it != m_objects.end();) {
1006 const void *ptr = it->second->immutable_object_ptr();
1007 size_t mem_released = it->second->release_optional();
1008 if (it->second->empty()) {
1009 if (ptr != nullptr)
1010 // Release the immutable object from the ptr to ObjectID map.
1011 m_shared_ptr_to_object_id.erase(ptr);
1012 mem_released += it->second->memsize();
1013 it = m_objects.erase(it);
1014 } else
1015 ++ it;
1016 assert(current_memsize >= mem_released);
1017 if (current_memsize >= mem_released)
1018 current_memsize -= mem_released;
1019 else
1020 current_memsize = 0;
1021 }
1022 while (current_memsize > m_memory_limit && m_snapshots.size() >= 3) {
1023 // From which side to remove a snapshot?
1024 assert(m_snapshots.front().timestamp < m_active_snapshot_time);
1025 size_t mem_released = 0;
1026 if (m_snapshots[1].timestamp == m_active_snapshot_time) {
1027 // Remove the last snapshot.
1028 #if 0
1029 for (auto it = m_objects.begin(); it != m_objects.end();) {
1030 mem_released += it->second->release_after_timestamp(m_snapshots.back().timestamp);
1031 if (it->second->empty()) {
1032 if (it->second->immutable_object_ptr() != nullptr)
1033 // Release the immutable object from the ptr to ObjectID map.
1034 m_shared_ptr_to_object_id.erase(it->second->immutable_object_ptr());
1035 mem_released += it->second->memsize();
1036 it = m_objects.erase(it);
1037 } else
1038 ++ it;
1039 }
1040 m_snapshots.pop_back();
1041 m_snapshots.back().name = topmost_snapshot_name;
1042 #else
1043 // Rather don't release the last snapshot as it will be very confusing to the user
1044 // as of why he cannot jump to the top most state. The Undo / Redo stack maximum size
1045 // should be set low enough to accomodate for the top most snapshot.
1046 break;
1047 #endif
1048 } else {
1049 // Remove the first snapshot.
1050 for (auto it = m_objects.begin(); it != m_objects.end();) {
1051 mem_released += it->second->release_before_timestamp(m_snapshots[1].timestamp);
1052 if (it->second->empty()) {
1053 if (it->second->immutable_object_ptr() != nullptr)
1054 // Release the immutable object from the ptr to ObjectID map.
1055 m_shared_ptr_to_object_id.erase(it->second->immutable_object_ptr());
1056 mem_released += it->second->memsize();
1057 it = m_objects.erase(it);
1058 } else
1059 ++ it;
1060 }
1061 m_snapshots.erase(m_snapshots.begin());
1062 }
1063 assert(current_memsize >= mem_released);
1064 if (current_memsize >= mem_released)
1065 current_memsize -= mem_released;
1066 else
1067 current_memsize = 0;
1068 #ifdef SLIC3R_UNDOREDO_DEBUG
1069 released = true;
1070 #endif
1071 }
1072 assert(this->valid());
1073 #ifdef SLIC3R_UNDOREDO_DEBUG
1074 std::cout << "After release_least_recently_used" << std::endl;
1075 this->print();
1076 #endif /* SLIC3R_UNDOREDO_DEBUG */
1077 }
1078
1079 // Wrappers of the private implementation.
Stack()1080 Stack::Stack() : pimpl(new StackImpl()) {}
~Stack()1081 Stack::~Stack() {}
clear()1082 void Stack::clear() { pimpl->clear(); }
empty() const1083 bool Stack::empty() const { return pimpl->empty(); }
1084
set_memory_limit(size_t memsize)1085 void Stack::set_memory_limit(size_t memsize) { pimpl->set_memory_limit(memsize); }
get_memory_limit() const1086 size_t Stack::get_memory_limit() const { return pimpl->get_memory_limit(); }
memsize() const1087 size_t Stack::memsize() const { return pimpl->memsize(); }
release_least_recently_used()1088 void Stack::release_least_recently_used() { pimpl->release_least_recently_used(); }
take_snapshot(const std::string & snapshot_name,const Slic3r::Model & model,const Slic3r::GUI::Selection & selection,const Slic3r::GUI::GLGizmosManager & gizmos,const SnapshotData & snapshot_data)1089 void Stack::take_snapshot(const std::string& snapshot_name, const Slic3r::Model& model, const Slic3r::GUI::Selection& selection, const Slic3r::GUI::GLGizmosManager& gizmos, const SnapshotData &snapshot_data)
1090 { pimpl->take_snapshot(snapshot_name, model, selection, gizmos, snapshot_data); }
has_undo_snapshot() const1091 bool Stack::has_undo_snapshot() const { return pimpl->has_undo_snapshot(); }
has_undo_snapshot(size_t time_to_load) const1092 bool Stack::has_undo_snapshot(size_t time_to_load) const { return pimpl->has_undo_snapshot(time_to_load); }
has_redo_snapshot() const1093 bool Stack::has_redo_snapshot() const { return pimpl->has_redo_snapshot(); }
undo(Slic3r::Model & model,const Slic3r::GUI::Selection & selection,Slic3r::GUI::GLGizmosManager & gizmos,const SnapshotData & snapshot_data,size_t time_to_load)1094 bool Stack::undo(Slic3r::Model& model, const Slic3r::GUI::Selection& selection, Slic3r::GUI::GLGizmosManager& gizmos, const SnapshotData &snapshot_data, size_t time_to_load)
1095 { return pimpl->undo(model, selection, gizmos, snapshot_data, time_to_load); }
redo(Slic3r::Model & model,Slic3r::GUI::GLGizmosManager & gizmos,size_t time_to_load)1096 bool Stack::redo(Slic3r::Model& model, Slic3r::GUI::GLGizmosManager& gizmos, size_t time_to_load) { return pimpl->redo(model, gizmos, time_to_load); }
selection_deserialized() const1097 const Selection& Stack::selection_deserialized() const { return pimpl->selection_deserialized(); }
1098
snapshots() const1099 const std::vector<Snapshot>& Stack::snapshots() const { return pimpl->snapshots(); }
active_snapshot_time() const1100 size_t Stack::active_snapshot_time() const { return pimpl->active_snapshot_time(); }
temp_snapshot_active() const1101 bool Stack::temp_snapshot_active() const { return pimpl->temp_snapshot_active(); }
1102
1103 } // namespace UndoRedo
1104 } // namespace Slic3r
1105
1106
1107 //FIXME we should have unit tests for testing serialization of basic types as DynamicPrintConfig.
1108 #if 0
1109 #include "libslic3r/Config.hpp"
1110 #include "libslic3r/PrintConfig.hpp"
1111 namespace Slic3r {
1112 bool test_dynamic_print_config_serialization() {
1113 FullPrintConfig full_print_config;
1114 DynamicPrintConfig cfg;
1115 cfg.apply(full_print_config, false);
1116
1117 std::string serialized;
1118 try {
1119 std::ostringstream ss;
1120 cereal::BinaryOutputArchive oarchive(ss);
1121 oarchive(cfg);
1122 serialized = ss.str();
1123 } catch (std::runtime_error e) {
1124 e.what();
1125 }
1126
1127 DynamicPrintConfig cfg2;
1128 try {
1129 std::stringstream ss(serialized);
1130 cereal::BinaryInputArchive iarchive(ss);
1131 iarchive(cfg2);
1132 } catch (std::runtime_error e) {
1133 e.what();
1134 }
1135
1136 if (cfg == cfg2) {
1137 printf("Yes!\n");
1138 return true;
1139 }
1140 printf("No!\n");
1141 return false;
1142 }
1143 } // namespace Slic3r
1144 #endif
1145