1 /*
2  * This program is free software; you can redistribute it and/or
3  * modify it under the terms of the GNU General Public License
4  * as published by the Free Software Foundation; either version 2
5  * of the License, or (at your option) any later version.
6  *
7  * This program is distributed in the hope that it will be useful,
8  * but WITHOUT ANY WARRANTY; without even the implied warranty of
9  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
10  * GNU General Public License for more details.
11  *
12  * You should have received a copy of the GNU General Public License
13  * along with this program; if not, write to the Free Software Foundation,
14  * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15  */
16 
17 /** \file
18  * \ingroup bke
19  *
20  * Used by ED_undo.h, internal implementation.
21  */
22 
23 #include <string.h>
24 
25 #include "CLG_log.h"
26 
27 #include "BLI_listbase.h"
28 #include "BLI_string.h"
29 #include "BLI_sys_types.h"
30 #include "BLI_utildefines.h"
31 
32 #include "BLT_translation.h"
33 
34 #include "DNA_listBase.h"
35 #include "DNA_windowmanager_types.h"
36 
37 #include "BKE_context.h"
38 #include "BKE_global.h"
39 #include "BKE_lib_override.h"
40 #include "BKE_main.h"
41 #include "BKE_undo_system.h"
42 
43 #include "MEM_guardedalloc.h"
44 
45 #define undo_stack _wm_undo_stack_disallow /* pass in as a variable always. */
46 
47 /** Odd requirement of Blender that we always keep a memfile undo in the stack. */
48 #define WITH_GLOBAL_UNDO_KEEP_ONE
49 
50 /** Make sure all ID's created at the point we add an undo step that uses ID's. */
51 #define WITH_GLOBAL_UNDO_ENSURE_UPDATED
52 
53 /**
54  * Make sure we don't apply edits on top of a newer memfile state, see: T56163.
55  * \note Keep an eye on this, could solve differently.
56  */
57 #define WITH_GLOBAL_UNDO_CORRECT_ORDER
58 
59 /** We only need this locally. */
60 static CLG_LogRef LOG = {"bke.undosys"};
61 
62 /* -------------------------------------------------------------------- */
63 /** \name Internal Nested Undo Checks
64  *
65  * Make sure we're not running undo operations from 'step_encode', 'step_decode' callbacks.
66  * bugs caused by this situation aren't _that_ hard to spot but aren't always so obvious.
67  * Best we have a check which shows the problem immediately.
68  *
69  * \{ */
70 #define WITH_NESTED_UNDO_CHECK
71 
72 #ifdef WITH_NESTED_UNDO_CHECK
73 static bool g_undo_callback_running = false;
74 #  define UNDO_NESTED_ASSERT(state) BLI_assert(g_undo_callback_running == state)
75 #  define UNDO_NESTED_CHECK_BEGIN \
76     { \
77       UNDO_NESTED_ASSERT(false); \
78       g_undo_callback_running = true; \
79     } \
80     ((void)0)
81 #  define UNDO_NESTED_CHECK_END \
82     { \
83       UNDO_NESTED_ASSERT(true); \
84       g_undo_callback_running = false; \
85     } \
86     ((void)0)
87 #else
88 #  define UNDO_NESTED_ASSERT(state) ((void)0)
89 #  define UNDO_NESTED_CHECK_BEGIN ((void)0)
90 #  define UNDO_NESTED_CHECK_END ((void)0)
91 #endif
92 /** \} */
93 
94 /* -------------------------------------------------------------------- */
95 /** \name Public Undo Types
96  *
97  * Unfortunately we need this for a handful of places.
98  */
99 const UndoType *BKE_UNDOSYS_TYPE_IMAGE = NULL;
100 const UndoType *BKE_UNDOSYS_TYPE_MEMFILE = NULL;
101 const UndoType *BKE_UNDOSYS_TYPE_PAINTCURVE = NULL;
102 const UndoType *BKE_UNDOSYS_TYPE_PARTICLE = NULL;
103 const UndoType *BKE_UNDOSYS_TYPE_SCULPT = NULL;
104 const UndoType *BKE_UNDOSYS_TYPE_TEXT = NULL;
105 /** \} */
106 
107 /* UndoType */
108 
109 static ListBase g_undo_types = {NULL, NULL};
110 
BKE_undosys_type_from_context(bContext * C)111 static const UndoType *BKE_undosys_type_from_context(bContext *C)
112 {
113   LISTBASE_FOREACH (const UndoType *, ut, &g_undo_types) {
114     /* No poll means we don't check context. */
115     if (ut->poll && ut->poll(C)) {
116       return ut;
117     }
118   }
119   return NULL;
120 }
121 
122 /* -------------------------------------------------------------------- */
123 /** \name Internal Callback Wrappers
124  *
125  * #UndoRefID is simply a way to avoid in-lining name copy and lookups,
126  * since it's easy to forget a single case when done inline (crashing in some cases).
127  *
128  * \{ */
129 
undosys_id_ref_store(void * UNUSED (user_data),UndoRefID * id_ref)130 static void undosys_id_ref_store(void *UNUSED(user_data), UndoRefID *id_ref)
131 {
132   BLI_assert(id_ref->name[0] == '\0');
133   if (id_ref->ptr) {
134     BLI_strncpy(id_ref->name, id_ref->ptr->name, sizeof(id_ref->name));
135     /* Not needed, just prevents stale data access. */
136     id_ref->ptr = NULL;
137   }
138 }
139 
undosys_id_ref_resolve(void * user_data,UndoRefID * id_ref)140 static void undosys_id_ref_resolve(void *user_data, UndoRefID *id_ref)
141 {
142   /* Note: we could optimize this,
143    * for now it's not too bad since it only runs when we access undo! */
144   Main *bmain = user_data;
145   ListBase *lb = which_libbase(bmain, GS(id_ref->name));
146   LISTBASE_FOREACH (ID *, id, lb) {
147     if (STREQ(id_ref->name, id->name) && (id->lib == NULL)) {
148       id_ref->ptr = id;
149       break;
150     }
151   }
152 }
153 
undosys_step_encode(bContext * C,Main * bmain,UndoStack * ustack,UndoStep * us)154 static bool undosys_step_encode(bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us)
155 {
156   CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
157   UNDO_NESTED_CHECK_BEGIN;
158   bool ok = us->type->step_encode(C, bmain, us);
159   UNDO_NESTED_CHECK_END;
160   if (ok) {
161     if (us->type->step_foreach_ID_ref != NULL) {
162       /* Don't use from context yet because sometimes context is fake and
163        * not all members are filled in. */
164       us->type->step_foreach_ID_ref(us, undosys_id_ref_store, bmain);
165     }
166 
167 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
168     if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
169       ustack->step_active_memfile = us;
170     }
171 #endif
172   }
173   if (ok == false) {
174     CLOG_INFO(&LOG, 2, "encode callback didn't create undo step");
175   }
176   return ok;
177 }
178 
undosys_step_decode(bContext * C,Main * bmain,UndoStack * ustack,UndoStep * us,int dir,bool is_final)179 static void undosys_step_decode(
180     bContext *C, Main *bmain, UndoStack *ustack, UndoStep *us, int dir, bool is_final)
181 {
182   CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
183 
184   if (us->type->step_foreach_ID_ref) {
185 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
186     if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
187       for (UndoStep *us_iter = us->prev; us_iter; us_iter = us_iter->prev) {
188         if (us_iter->type == BKE_UNDOSYS_TYPE_MEMFILE) {
189           if (us_iter == ustack->step_active_memfile) {
190             /* Common case, we're already using the last memfile state. */
191           }
192           else {
193             /* Load the previous memfile state so any ID's referenced in this
194              * undo step will be correctly resolved, see: T56163. */
195             undosys_step_decode(C, bmain, ustack, us_iter, dir, false);
196             /* May have been freed on memfile read. */
197             bmain = G_MAIN;
198           }
199           break;
200         }
201       }
202     }
203 #endif
204     /* Don't use from context yet because sometimes context is fake and
205      * not all members are filled in. */
206     us->type->step_foreach_ID_ref(us, undosys_id_ref_resolve, bmain);
207   }
208 
209   UNDO_NESTED_CHECK_BEGIN;
210   us->type->step_decode(C, bmain, us, dir, is_final);
211   UNDO_NESTED_CHECK_END;
212 
213 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
214   if (us->type == BKE_UNDOSYS_TYPE_MEMFILE) {
215     ustack->step_active_memfile = us;
216   }
217 #endif
218 }
219 
undosys_step_free_and_unlink(UndoStack * ustack,UndoStep * us)220 static void undosys_step_free_and_unlink(UndoStack *ustack, UndoStep *us)
221 {
222   CLOG_INFO(&LOG, 2, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
223   UNDO_NESTED_CHECK_BEGIN;
224   us->type->step_free(us);
225   UNDO_NESTED_CHECK_END;
226 
227   BLI_remlink(&ustack->steps, us);
228   MEM_freeN(us);
229 
230 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
231   if (ustack->step_active_memfile == us) {
232     ustack->step_active_memfile = NULL;
233   }
234 #endif
235 }
236 
237 /** \} */
238 
239 /* -------------------------------------------------------------------- */
240 /** \name Undo Stack
241  * \{ */
242 
243 #ifndef NDEBUG
undosys_stack_validate(UndoStack * ustack,bool expect_non_empty)244 static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
245 {
246   if (ustack->step_active != NULL) {
247     BLI_assert(!BLI_listbase_is_empty(&ustack->steps));
248     BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
249   }
250   if (expect_non_empty) {
251     BLI_assert(!BLI_listbase_is_empty(&ustack->steps));
252   }
253 }
254 #else
undosys_stack_validate(UndoStack * ustack,bool expect_non_empty)255 static void undosys_stack_validate(UndoStack *ustack, bool expect_non_empty)
256 {
257   UNUSED_VARS(ustack, expect_non_empty);
258 }
259 #endif
260 
BKE_undosys_stack_create(void)261 UndoStack *BKE_undosys_stack_create(void)
262 {
263   UndoStack *ustack = MEM_callocN(sizeof(UndoStack), __func__);
264   return ustack;
265 }
266 
BKE_undosys_stack_destroy(UndoStack * ustack)267 void BKE_undosys_stack_destroy(UndoStack *ustack)
268 {
269   BKE_undosys_stack_clear(ustack);
270   MEM_freeN(ustack);
271 }
272 
BKE_undosys_stack_clear(UndoStack * ustack)273 void BKE_undosys_stack_clear(UndoStack *ustack)
274 {
275   UNDO_NESTED_ASSERT(false);
276   CLOG_INFO(&LOG, 1, "steps=%d", BLI_listbase_count(&ustack->steps));
277   for (UndoStep *us = ustack->steps.last, *us_prev; us; us = us_prev) {
278     us_prev = us->prev;
279     undosys_step_free_and_unlink(ustack, us);
280   }
281   BLI_listbase_clear(&ustack->steps);
282   ustack->step_active = NULL;
283 }
284 
BKE_undosys_stack_clear_active(UndoStack * ustack)285 void BKE_undosys_stack_clear_active(UndoStack *ustack)
286 {
287   /* Remove active and all following undos. */
288   UndoStep *us = ustack->step_active;
289 
290   if (us) {
291     ustack->step_active = us->prev;
292     bool is_not_empty = ustack->step_active != NULL;
293 
294     while (ustack->steps.last != ustack->step_active) {
295       UndoStep *us_iter = ustack->steps.last;
296       undosys_step_free_and_unlink(ustack, us_iter);
297       undosys_stack_validate(ustack, is_not_empty);
298     }
299   }
300 }
301 
302 /* Caller is responsible for handling active. */
undosys_stack_clear_all_last(UndoStack * ustack,UndoStep * us)303 static void undosys_stack_clear_all_last(UndoStack *ustack, UndoStep *us)
304 {
305   if (us) {
306     bool is_not_empty = true;
307     UndoStep *us_iter;
308     do {
309       us_iter = ustack->steps.last;
310       BLI_assert(us_iter != ustack->step_active);
311       undosys_step_free_and_unlink(ustack, us_iter);
312       undosys_stack_validate(ustack, is_not_empty);
313     } while ((us != us_iter));
314   }
315 }
316 
undosys_stack_clear_all_first(UndoStack * ustack,UndoStep * us,UndoStep * us_exclude)317 static void undosys_stack_clear_all_first(UndoStack *ustack, UndoStep *us, UndoStep *us_exclude)
318 {
319   if (us && us == us_exclude) {
320     us = us->prev;
321   }
322 
323   if (us) {
324     bool is_not_empty = true;
325     UndoStep *us_iter;
326     do {
327       us_iter = ustack->steps.first;
328       if (us_iter == us_exclude) {
329         us_iter = us_iter->next;
330       }
331       BLI_assert(us_iter != ustack->step_active);
332       undosys_step_free_and_unlink(ustack, us_iter);
333       undosys_stack_validate(ustack, is_not_empty);
334     } while ((us != us_iter));
335   }
336 }
337 
undosys_stack_push_main(UndoStack * ustack,const char * name,struct Main * bmain)338 static bool undosys_stack_push_main(UndoStack *ustack, const char *name, struct Main *bmain)
339 {
340   UNDO_NESTED_ASSERT(false);
341   BLI_assert(ustack->step_init == NULL);
342   CLOG_INFO(&LOG, 1, "'%s'", name);
343   bContext *C_temp = CTX_create();
344   CTX_data_main_set(C_temp, bmain);
345   bool ok = BKE_undosys_step_push_with_type(ustack, C_temp, name, BKE_UNDOSYS_TYPE_MEMFILE);
346   CTX_free(C_temp);
347   return ok;
348 }
349 
BKE_undosys_stack_init_from_main(UndoStack * ustack,struct Main * bmain)350 void BKE_undosys_stack_init_from_main(UndoStack *ustack, struct Main *bmain)
351 {
352   UNDO_NESTED_ASSERT(false);
353   undosys_stack_push_main(ustack, IFACE_("Original"), bmain);
354 }
355 
356 /* called after 'BKE_undosys_stack_init_from_main' */
BKE_undosys_stack_init_from_context(UndoStack * ustack,bContext * C)357 void BKE_undosys_stack_init_from_context(UndoStack *ustack, bContext *C)
358 {
359   const UndoType *ut = BKE_undosys_type_from_context(C);
360   if ((ut != NULL) && (ut != BKE_UNDOSYS_TYPE_MEMFILE)) {
361     BKE_undosys_step_push_with_type(ustack, C, IFACE_("Original Mode"), ut);
362   }
363 }
364 
365 /* name optional */
BKE_undosys_stack_has_undo(UndoStack * ustack,const char * name)366 bool BKE_undosys_stack_has_undo(UndoStack *ustack, const char *name)
367 {
368   if (name) {
369     UndoStep *us = BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
370     return us && us->prev;
371   }
372 
373   return !BLI_listbase_is_empty(&ustack->steps);
374 }
375 
BKE_undosys_stack_active_with_type(UndoStack * ustack,const UndoType * ut)376 UndoStep *BKE_undosys_stack_active_with_type(UndoStack *ustack, const UndoType *ut)
377 {
378   UndoStep *us = ustack->step_active;
379   while (us && (us->type != ut)) {
380     us = us->prev;
381   }
382   return us;
383 }
384 
BKE_undosys_stack_init_or_active_with_type(UndoStack * ustack,const UndoType * ut)385 UndoStep *BKE_undosys_stack_init_or_active_with_type(UndoStack *ustack, const UndoType *ut)
386 {
387   UNDO_NESTED_ASSERT(false);
388   CLOG_INFO(&LOG, 1, "type='%s'", ut->name);
389   if (ustack->step_init && (ustack->step_init->type == ut)) {
390     return ustack->step_init;
391   }
392   return BKE_undosys_stack_active_with_type(ustack, ut);
393 }
394 
395 /**
396  * \param steps: Limit the number of undo steps.
397  * \param memory_limit: Limit the amount of memory used by the undo stack.
398  */
BKE_undosys_stack_limit_steps_and_memory(UndoStack * ustack,int steps,size_t memory_limit)399 void BKE_undosys_stack_limit_steps_and_memory(UndoStack *ustack, int steps, size_t memory_limit)
400 {
401   UNDO_NESTED_ASSERT(false);
402   if ((steps == -1) && (memory_limit != 0)) {
403     return;
404   }
405 
406   CLOG_INFO(&LOG, 1, "steps=%d, memory_limit=%zu", steps, memory_limit);
407   UndoStep *us;
408   UndoStep *us_exclude = NULL;
409   /* keep at least two (original + other) */
410   size_t data_size_all = 0;
411   size_t us_count = 0;
412   for (us = ustack->steps.last; us && us->prev; us = us->prev) {
413     if (memory_limit) {
414       data_size_all += us->data_size;
415       if (data_size_all > memory_limit) {
416         break;
417       }
418     }
419     if (steps != -1) {
420       if (us_count == steps) {
421         break;
422       }
423       if (us->skip == false) {
424         us_count += 1;
425       }
426     }
427   }
428 
429   if (us) {
430 #ifdef WITH_GLOBAL_UNDO_KEEP_ONE
431     /* Hack, we need to keep at least one BKE_UNDOSYS_TYPE_MEMFILE. */
432     if (us->type != BKE_UNDOSYS_TYPE_MEMFILE) {
433       us_exclude = us->prev;
434       while (us_exclude && us_exclude->type != BKE_UNDOSYS_TYPE_MEMFILE) {
435         us_exclude = us_exclude->prev;
436       }
437       /* Once this is outside the given number of 'steps', undoing onto this state
438        * may skip past many undo steps which is confusing, instead,
439        * disallow stepping onto this state entirely. */
440       if (us_exclude) {
441         us_exclude->skip = true;
442       }
443     }
444 #endif
445     /* Free from first to last, free functions may update de-duplication info
446      * (see #MemFileUndoStep). */
447     undosys_stack_clear_all_first(ustack, us->prev, us_exclude);
448   }
449 }
450 
451 /** \} */
452 
BKE_undosys_step_push_init_with_type(UndoStack * ustack,bContext * C,const char * name,const UndoType * ut)453 UndoStep *BKE_undosys_step_push_init_with_type(UndoStack *ustack,
454                                                bContext *C,
455                                                const char *name,
456                                                const UndoType *ut)
457 {
458   UNDO_NESTED_ASSERT(false);
459   /* We could detect and clean this up (but it should never happen!). */
460   BLI_assert(ustack->step_init == NULL);
461   if (ut->step_encode_init) {
462     undosys_stack_validate(ustack, false);
463 
464     if (ustack->step_active) {
465       undosys_stack_clear_all_last(ustack, ustack->step_active->next);
466     }
467 
468     UndoStep *us = MEM_callocN(ut->step_size, __func__);
469     if (name != NULL) {
470       BLI_strncpy(us->name, name, sizeof(us->name));
471     }
472     us->type = ut;
473     ustack->step_init = us;
474     CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
475     ut->step_encode_init(C, us);
476     undosys_stack_validate(ustack, false);
477     return us;
478   }
479 
480   return NULL;
481 }
482 
BKE_undosys_step_push_init(UndoStack * ustack,bContext * C,const char * name)483 UndoStep *BKE_undosys_step_push_init(UndoStack *ustack, bContext *C, const char *name)
484 {
485   UNDO_NESTED_ASSERT(false);
486   /* We could detect and clean this up (but it should never happen!). */
487   BLI_assert(ustack->step_init == NULL);
488   const UndoType *ut = BKE_undosys_type_from_context(C);
489   if (ut == NULL) {
490     return NULL;
491   }
492   return BKE_undosys_step_push_init_with_type(ustack, C, name, ut);
493 }
494 
495 /**
496  * \param C: Can be NULL from some callers if their encoding function doesn't need it
497  */
BKE_undosys_step_push_with_type(UndoStack * ustack,bContext * C,const char * name,const UndoType * ut)498 bool BKE_undosys_step_push_with_type(UndoStack *ustack,
499                                      bContext *C,
500                                      const char *name,
501                                      const UndoType *ut)
502 {
503   UNDO_NESTED_ASSERT(false);
504   undosys_stack_validate(ustack, false);
505   bool is_not_empty = ustack->step_active != NULL;
506 
507   /* Might not be final place for this to be called - probably only want to call it from some
508    * undo handlers, not all of them? */
509   BKE_lib_override_library_main_operations_create(G_MAIN, false);
510 
511   /* Remove all undos after (also when 'ustack->step_active == NULL'). */
512   while (ustack->steps.last != ustack->step_active) {
513     UndoStep *us_iter = ustack->steps.last;
514     undosys_step_free_and_unlink(ustack, us_iter);
515     undosys_stack_validate(ustack, is_not_empty);
516   }
517 
518   if (ustack->step_active) {
519     BLI_assert(BLI_findindex(&ustack->steps, ustack->step_active) != -1);
520   }
521 
522 #ifdef WITH_GLOBAL_UNDO_ENSURE_UPDATED
523   if (ut->step_foreach_ID_ref != NULL) {
524     if (G_MAIN->is_memfile_undo_written == false) {
525       const char *name_internal = "MemFile Internal (pre)";
526       /* Don't let 'step_init' cause issues when adding memfile undo step. */
527       void *step_init = ustack->step_init;
528       ustack->step_init = NULL;
529       const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
530       /* Restore 'step_init'. */
531       ustack->step_init = step_init;
532       if (ok) {
533         UndoStep *us = ustack->steps.last;
534         BLI_assert(STREQ(us->name, name_internal));
535         us->skip = true;
536 #  ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
537         ustack->step_active_memfile = us;
538 #  endif
539       }
540     }
541   }
542 #endif
543 
544   bool use_memfile_step = false;
545   {
546     UndoStep *us = ustack->step_init ? ustack->step_init : MEM_callocN(ut->step_size, __func__);
547     ustack->step_init = NULL;
548     if (us->name[0] == '\0') {
549       BLI_strncpy(us->name, name, sizeof(us->name));
550     }
551     us->type = ut;
552     /* Initialized, not added yet. */
553 
554     CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
555 
556     if (!undosys_step_encode(C, G_MAIN, ustack, us)) {
557       MEM_freeN(us);
558       undosys_stack_validate(ustack, true);
559       return false;
560     }
561     ustack->step_active = us;
562     BLI_addtail(&ustack->steps, us);
563     use_memfile_step = us->use_memfile_step;
564   }
565 
566   if (use_memfile_step) {
567     /* Make this the user visible undo state, so redo always applies
568      * on top of the mem-file undo instead of skipping it. see: T67256. */
569     UndoStep *us_prev = ustack->step_active;
570     const char *name_internal = us_prev->name;
571     const bool ok = undosys_stack_push_main(ustack, name_internal, G_MAIN);
572     if (ok) {
573       UndoStep *us = ustack->steps.last;
574       BLI_assert(STREQ(us->name, name_internal));
575       us_prev->skip = true;
576 #ifdef WITH_GLOBAL_UNDO_CORRECT_ORDER
577       ustack->step_active_memfile = us;
578 #endif
579       ustack->step_active = us;
580     }
581   }
582 
583   if (ustack->group_level > 0) {
584     /* Temporarily set skip for the active step.
585      * This is an invalid state which must be corrected once the last group ends. */
586     ustack->step_active->skip = true;
587   }
588 
589   undosys_stack_validate(ustack, true);
590   return true;
591 }
592 
BKE_undosys_step_push(UndoStack * ustack,bContext * C,const char * name)593 bool BKE_undosys_step_push(UndoStack *ustack, bContext *C, const char *name)
594 {
595   UNDO_NESTED_ASSERT(false);
596   const UndoType *ut = ustack->step_init ? ustack->step_init->type :
597                                            BKE_undosys_type_from_context(C);
598   if (ut == NULL) {
599     return false;
600   }
601   return BKE_undosys_step_push_with_type(ustack, C, name, ut);
602 }
603 
604 /**
605  * Useful when we want to diff against previous undo data but can't be sure the types match.
606  */
BKE_undosys_step_same_type_next(UndoStep * us)607 UndoStep *BKE_undosys_step_same_type_next(UndoStep *us)
608 {
609   if (us) {
610     const UndoType *ut = us->type;
611     while ((us = us->next)) {
612       if (us->type == ut) {
613         return us;
614       }
615     }
616   }
617   return us;
618 }
619 
620 /**
621  * Useful when we want to diff against previous undo data but can't be sure the types match.
622  */
BKE_undosys_step_same_type_prev(UndoStep * us)623 UndoStep *BKE_undosys_step_same_type_prev(UndoStep *us)
624 {
625   if (us) {
626     const UndoType *ut = us->type;
627     while ((us = us->prev)) {
628       if (us->type == ut) {
629         return us;
630       }
631     }
632   }
633   return us;
634 }
635 
BKE_undosys_step_find_by_name_with_type(UndoStack * ustack,const char * name,const UndoType * ut)636 UndoStep *BKE_undosys_step_find_by_name_with_type(UndoStack *ustack,
637                                                   const char *name,
638                                                   const UndoType *ut)
639 {
640   for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
641     if (us->type == ut) {
642       if (STREQ(name, us->name)) {
643         return us;
644       }
645     }
646   }
647   return NULL;
648 }
649 
BKE_undosys_step_find_by_name(UndoStack * ustack,const char * name)650 UndoStep *BKE_undosys_step_find_by_name(UndoStack *ustack, const char *name)
651 {
652   return BLI_rfindstring(&ustack->steps, name, offsetof(UndoStep, name));
653 }
654 
BKE_undosys_step_find_by_type(UndoStack * ustack,const UndoType * ut)655 UndoStep *BKE_undosys_step_find_by_type(UndoStack *ustack, const UndoType *ut)
656 {
657   for (UndoStep *us = ustack->steps.last; us; us = us->prev) {
658     if (us->type == ut) {
659       return us;
660     }
661   }
662   return NULL;
663 }
664 
BKE_undosys_step_undo_with_data_ex(UndoStack * ustack,bContext * C,UndoStep * us,bool use_skip)665 bool BKE_undosys_step_undo_with_data_ex(UndoStack *ustack,
666                                         bContext *C,
667                                         UndoStep *us,
668                                         bool use_skip)
669 {
670   UNDO_NESTED_ASSERT(false);
671   if (us) {
672     undosys_stack_validate(ustack, true);
673   }
674   UndoStep *us_prev = us ? us->prev : NULL;
675   if (us) {
676     /* The current state is a copy, we need to load the previous state. */
677     us = us_prev;
678   }
679 
680   /* This will be active once complete. */
681   UndoStep *us_active = us_prev;
682   if (use_skip) {
683     while (us_active && us_active->skip) {
684       us_active = us_active->prev;
685     }
686   }
687 
688   if ((us != NULL) && (us_active != NULL)) {
689     CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
690 
691     /* Handle accumulate steps. */
692     if (ustack->step_active) {
693       UndoStep *us_iter = ustack->step_active;
694       while (us_iter != us) {
695         /* TODO:
696          * - skip successive steps that store the same data, eg: memfile steps.
697          * - or steps that include another steps data, eg: a memfile step includes text undo data.
698          */
699         undosys_step_decode(C, G_MAIN, ustack, us_iter, -1, false);
700 
701         us_iter = us_iter->prev;
702       }
703     }
704 
705     {
706       UndoStep *us_iter = us_prev;
707       do {
708         const bool is_final = (us_iter == us_active);
709         if (is_final == false) {
710           CLOG_INFO(&LOG,
711                     2,
712                     "undo continue with skip %p '%s', type='%s'",
713                     us_iter,
714                     us_iter->name,
715                     us_iter->type->name);
716         }
717         undosys_step_decode(C, G_MAIN, ustack, us_iter, -1, is_final);
718         ustack->step_active = us_iter;
719       } while ((us_active != us_iter) && (us_iter = us_iter->prev));
720     }
721 
722     return true;
723   }
724   return false;
725 }
BKE_undosys_step_undo_with_data(UndoStack * ustack,bContext * C,UndoStep * us)726 bool BKE_undosys_step_undo_with_data(UndoStack *ustack, bContext *C, UndoStep *us)
727 {
728   return BKE_undosys_step_undo_with_data_ex(ustack, C, us, true);
729 }
730 
BKE_undosys_step_undo(UndoStack * ustack,bContext * C)731 bool BKE_undosys_step_undo(UndoStack *ustack, bContext *C)
732 {
733   return BKE_undosys_step_undo_with_data(ustack, C, ustack->step_active);
734 }
735 
BKE_undosys_step_undo_from_index(UndoStack * ustack,bContext * C,int index)736 void BKE_undosys_step_undo_from_index(UndoStack *ustack, bContext *C, int index)
737 {
738   UndoStep *us = BLI_findlink(&ustack->steps, index);
739   BLI_assert(us->skip == false);
740   BKE_undosys_step_load_data(ustack, C, us);
741 }
742 
BKE_undosys_step_redo_with_data_ex(UndoStack * ustack,bContext * C,UndoStep * us,bool use_skip)743 bool BKE_undosys_step_redo_with_data_ex(UndoStack *ustack,
744                                         bContext *C,
745                                         UndoStep *us,
746                                         bool use_skip)
747 {
748   UNDO_NESTED_ASSERT(false);
749   UndoStep *us_next = us ? us->next : NULL;
750   /* Unlike undo accumulate, we always use the next. */
751   us = us_next;
752 
753   /* This will be active once complete. */
754   UndoStep *us_active = us_next;
755   if (use_skip) {
756     while (us_active && us_active->skip) {
757       us_active = us_active->next;
758     }
759   }
760 
761   if ((us != NULL) && (us_active != NULL)) {
762     CLOG_INFO(&LOG, 1, "addr=%p, name='%s', type='%s'", us, us->name, us->type->name);
763 
764     /* Handle accumulate steps. */
765     if (ustack->step_active && ustack->step_active->next) {
766       UndoStep *us_iter = ustack->step_active->next;
767       while (us_iter != us) {
768         undosys_step_decode(C, G_MAIN, ustack, us_iter, 1, false);
769         us_iter = us_iter->next;
770       }
771     }
772 
773     {
774       UndoStep *us_iter = us_next;
775       do {
776         const bool is_final = (us_iter == us_active);
777         if (is_final == false) {
778           CLOG_INFO(&LOG,
779                     2,
780                     "redo continue with skip %p '%s', type='%s'",
781                     us_iter,
782                     us_iter->name,
783                     us_iter->type->name);
784         }
785         undosys_step_decode(C, G_MAIN, ustack, us_iter, 1, is_final);
786         ustack->step_active = us_iter;
787       } while ((us_active != us_iter) && (us_iter = us_iter->next));
788     }
789     return true;
790   }
791   return false;
792 }
BKE_undosys_step_redo_with_data(UndoStack * ustack,bContext * C,UndoStep * us)793 bool BKE_undosys_step_redo_with_data(UndoStack *ustack, bContext *C, UndoStep *us)
794 {
795   return BKE_undosys_step_redo_with_data_ex(ustack, C, us, true);
796 }
797 
BKE_undosys_step_redo(UndoStack * ustack,bContext * C)798 bool BKE_undosys_step_redo(UndoStack *ustack, bContext *C)
799 {
800   return BKE_undosys_step_redo_with_data(ustack, C, ustack->step_active);
801 }
802 
BKE_undosys_step_load_data(UndoStack * ustack,bContext * C,UndoStep * us)803 bool BKE_undosys_step_load_data(UndoStack *ustack, bContext *C, UndoStep *us)
804 {
805   UNDO_NESTED_ASSERT(false);
806   const int index_active = BLI_findindex(&ustack->steps, ustack->step_active);
807   const int index_target = BLI_findindex(&ustack->steps, us);
808   BLI_assert(!ELEM(-1, index_active, index_target));
809   bool ok = true;
810 
811   if (index_target < index_active) {
812     uint i = index_active - index_target;
813     while (i-- && ok) {
814       ok = BKE_undosys_step_undo_with_data_ex(ustack, C, ustack->step_active, false);
815     }
816   }
817   else if (index_target > index_active) {
818     uint i = index_target - index_active;
819     while (i-- && ok) {
820       ok = BKE_undosys_step_redo_with_data_ex(ustack, C, ustack->step_active, false);
821     }
822   }
823 
824   if (ok) {
825     BLI_assert(ustack->step_active == us);
826   }
827 
828   return ok;
829 }
830 
831 /**
832  * Similar to #WM_operatortype_append
833  */
BKE_undosys_type_append(void (* undosys_fn)(UndoType *))834 UndoType *BKE_undosys_type_append(void (*undosys_fn)(UndoType *))
835 {
836   UndoType *ut;
837 
838   ut = MEM_callocN(sizeof(UndoType), __func__);
839 
840   undosys_fn(ut);
841 
842   BLI_addtail(&g_undo_types, ut);
843 
844   return ut;
845 }
846 
BKE_undosys_type_free_all(void)847 void BKE_undosys_type_free_all(void)
848 {
849   UndoType *ut;
850   while ((ut = BLI_pophead(&g_undo_types))) {
851     MEM_freeN(ut);
852   }
853 }
854 
855 /** \} */
856 
857 /* -------------------------------------------------------------------- */
858 /** \name Undo Stack Grouping
859  *
860  * This enables skip while group-level is set.
861  * In general it's not allowed that #UndoStack.step_active have 'skip' enabled.
862  *
863  * This rule is relaxed for grouping, however it's important each call to
864  * #BKE_undosys_stack_group_begin has a matching #BKE_undosys_stack_group_end.
865  *
866  * - Levels are used so nesting is supported, where the last call to #BKE_undosys_stack_group_end
867  *   will set the active undo step that should not be skipped.
868  *
869  * - Correct begin/end is checked by an assert since any errors here will cause undo
870  *   to consider all steps part of one large group.
871  *
872  * - Calls to begin/end with no undo steps being pushed is supported and does nothing.
873  *
874  * \{ */
875 
BKE_undosys_stack_group_begin(UndoStack * ustack)876 void BKE_undosys_stack_group_begin(UndoStack *ustack)
877 {
878   BLI_assert(ustack->group_level >= 0);
879   ustack->group_level += 1;
880 }
881 
BKE_undosys_stack_group_end(UndoStack * ustack)882 void BKE_undosys_stack_group_end(UndoStack *ustack)
883 {
884   ustack->group_level -= 1;
885   BLI_assert(ustack->group_level >= 0);
886 
887   if (ustack->group_level == 0) {
888     if (LIKELY(ustack->step_active != NULL)) {
889       ustack->step_active->skip = false;
890     }
891   }
892 }
893 
894 /** \} */
895 
896 /* -------------------------------------------------------------------- */
897 /** \name ID Reference Utilities
898  *
899  * Unfortunately we need this for a handful of places.
900  */
901 
UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref (UndoStack * ustack,UndoTypeForEachIDRefFn foreach_ID_ref_fn,void * user_data))902 static void UNUSED_FUNCTION(BKE_undosys_foreach_ID_ref(UndoStack *ustack,
903                                                        UndoTypeForEachIDRefFn foreach_ID_ref_fn,
904                                                        void *user_data))
905 {
906   LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
907     const UndoType *ut = us->type;
908     if (ut->step_foreach_ID_ref != NULL) {
909       ut->step_foreach_ID_ref(us, foreach_ID_ref_fn, user_data);
910     }
911   }
912 }
913 
914 /** \} */
915 
916 /* -------------------------------------------------------------------- */
917 /** \name Debug Helpers
918  * \{ */
919 
BKE_undosys_print(UndoStack * ustack)920 void BKE_undosys_print(UndoStack *ustack)
921 {
922   printf("Undo %d Steps (*: active, #=applied, M=memfile-active, S=skip)\n",
923          BLI_listbase_count(&ustack->steps));
924   int index = 0;
925   LISTBASE_FOREACH (UndoStep *, us, &ustack->steps) {
926     printf("[%c%c%c%c] %3d {%p} type='%s', name='%s'\n",
927            (us == ustack->step_active) ? '*' : ' ',
928            us->is_applied ? '#' : ' ',
929            (us == ustack->step_active_memfile) ? 'M' : ' ',
930            us->skip ? 'S' : ' ',
931            index,
932            (void *)us,
933            us->type->name,
934            us->name);
935     index++;
936   }
937 }
938 
939 /** \} */
940