2018-03-19 14:17:59 +01:00
|
|
|
/*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public License
|
|
|
|
* as published by the Free Software Foundation; either version 2
|
|
|
|
* of the License, or (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software Foundation,
|
|
|
|
* Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
|
|
|
|
*/
|
|
|
|
#pragma once
|
|
|
|
|
2019-02-18 08:08:12 +11:00
|
|
|
/** \file
|
|
|
|
* \ingroup bke
|
2018-03-19 14:17:59 +01:00
|
|
|
*/
|
|
|
|
|
2020-03-02 15:07:49 +01:00
|
|
|
#include "DNA_ID.h"
|
|
|
|
#include "DNA_listBase.h"
|
|
|
|
|
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
struct Main;
|
|
|
|
struct UndoStep;
|
|
|
|
struct bContext;
|
|
|
|
|
|
|
|
/* ID's */
|
2019-01-31 11:34:57 +11:00
|
|
|
struct Main;
|
2018-03-19 14:17:59 +01:00
|
|
|
struct Mesh;
|
|
|
|
struct Object;
|
|
|
|
struct Scene;
|
|
|
|
struct Text;
|
|
|
|
|
|
|
|
typedef struct UndoRefID {
|
|
|
|
struct ID *ptr;
|
|
|
|
char name[MAX_ID_NAME];
|
|
|
|
} UndoRefID;
|
|
|
|
/* UndoRefID_Mesh & friends. */
|
|
|
|
#define UNDO_REF_ID_TYPE(ptr_ty) \
|
|
|
|
typedef struct UndoRefID_##ptr_ty { \
|
|
|
|
struct ptr_ty *ptr; \
|
|
|
|
char name[MAX_ID_NAME]; \
|
|
|
|
} UndoRefID_##ptr_ty
|
|
|
|
UNDO_REF_ID_TYPE(Mesh);
|
|
|
|
UNDO_REF_ID_TYPE(Object);
|
|
|
|
UNDO_REF_ID_TYPE(Scene);
|
|
|
|
UNDO_REF_ID_TYPE(Text);
|
2019-07-26 20:03:21 +10:00
|
|
|
UNDO_REF_ID_TYPE(Image);
|
2019-11-13 13:59:36 +11:00
|
|
|
UNDO_REF_ID_TYPE(PaintCurve);
|
2018-03-19 14:17:59 +01:00
|
|
|
|
|
|
|
typedef struct UndoStack {
|
|
|
|
ListBase steps;
|
|
|
|
struct UndoStep *step_active;
|
2019-01-29 14:31:00 +11:00
|
|
|
/**
|
|
|
|
* The last memfile state read, used so we can be sure the names from the
|
|
|
|
* library state matches the state an undo step was written in.
|
|
|
|
*/
|
|
|
|
struct UndoStep *step_active_memfile;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/**
|
|
|
|
* Some undo systems require begin/end, see: #UndoType.step_encode_init
|
|
|
|
*
|
|
|
|
* \note This is not included in the 'steps' list.
|
|
|
|
* That is done once end is called.
|
|
|
|
*/
|
|
|
|
struct UndoStep *step_init;
|
2020-10-30 20:24:13 +11:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Keep track of nested group begin/end calls,
|
|
|
|
* within which all but the last undo-step is marked for skipping.
|
|
|
|
*/
|
|
|
|
int group_level;
|
2018-03-19 14:17:59 +01:00
|
|
|
} UndoStack;
|
|
|
|
|
|
|
|
typedef struct UndoStep {
|
|
|
|
struct UndoStep *next, *prev;
|
|
|
|
char name[64];
|
|
|
|
const struct UndoType *type;
|
|
|
|
/** Size in bytes of all data in step (not including the step). */
|
|
|
|
size_t data_size;
|
|
|
|
/** Users should never see this step (only use for internal consistency). */
|
|
|
|
bool skip;
|
2019-01-30 20:44:15 +11:00
|
|
|
/** Some situations require the global state to be stored, edge cases when exiting modes. */
|
|
|
|
bool use_memfile_step;
|
2020-03-17 12:29:36 +01:00
|
|
|
/** When this is true, undo/memfile read code is allowed to re-use old data-blocks for unchanged
|
|
|
|
* IDs, and existing depsgraphes. This has to be forbidden in some cases (like renamed IDs). */
|
|
|
|
bool use_old_bmain_data;
|
2019-02-05 14:24:11 +11:00
|
|
|
/** For use by undo systems that accumulate changes (text editor, painting). */
|
|
|
|
bool is_applied;
|
2018-03-19 14:17:59 +01:00
|
|
|
/* Over alloc 'type->struct_size'. */
|
|
|
|
} UndoStep;
|
|
|
|
|
2021-02-04 22:03:39 +01:00
|
|
|
typedef enum eUndoStepDir {
|
|
|
|
STEP_REDO = 1,
|
|
|
|
STEP_UNDO = -1,
|
|
|
|
STEP_INVALID = 0,
|
|
|
|
} eUndoStepDir;
|
|
|
|
|
2020-12-27 22:15:20 +01:00
|
|
|
typedef enum UndoPushReturn {
|
|
|
|
UNDO_PUSH_RET_FAILURE = 0,
|
|
|
|
UNDO_PUSH_RET_SUCCESS = (1 << 0),
|
|
|
|
UNDO_PUSH_RET_OVERRIDE_CHANGED = (1 << 1),
|
|
|
|
} UndoPushReturn;
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
typedef void (*UndoTypeForEachIDRefFn)(void *user_data, struct UndoRefID *id_ref);
|
|
|
|
|
|
|
|
typedef struct UndoType {
|
|
|
|
struct UndoType *next, *prev;
|
|
|
|
/** Only for debugging. */
|
|
|
|
const char *name;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/**
|
|
|
|
* When NULL, we don't consider this undo type for context checks.
|
|
|
|
* Operators must explicitly set the undo type and handle adding the undo step.
|
2019-04-27 12:07:07 +10:00
|
|
|
* This is needed when tools operate on data which isn't the primary mode
|
|
|
|
* (eg, paint-curve in sculpt mode).
|
2018-03-19 14:17:59 +01:00
|
|
|
*/
|
|
|
|
bool (*poll)(struct bContext *C);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/**
|
|
|
|
* None of these callbacks manage list add/removal.
|
|
|
|
*
|
|
|
|
* Note that 'step_encode_init' is optional,
|
2019-07-02 17:38:36 +10:00
|
|
|
* some undo types need to perform operations before undo push finishes.
|
2018-03-19 14:17:59 +01:00
|
|
|
*/
|
|
|
|
void (*step_encode_init)(struct bContext *C, UndoStep *us);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2019-01-31 11:34:57 +11:00
|
|
|
bool (*step_encode)(struct bContext *C, struct Main *bmain, UndoStep *us);
|
2019-07-11 09:36:59 +10:00
|
|
|
void (*step_decode)(
|
2021-02-04 22:03:39 +01:00
|
|
|
struct bContext *C, struct Main *bmain, UndoStep *us, const eUndoStepDir dir, bool is_final);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/**
|
|
|
|
* \note When freeing all steps,
|
2020-06-27 14:34:16 +10:00
|
|
|
* free from the last since #BKE_UNDOSYS_TYPE_MEMFILE
|
|
|
|
* will merge with the next undo type in the list.
|
|
|
|
*/
|
2018-03-19 14:17:59 +01:00
|
|
|
void (*step_free)(UndoStep *us);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
void (*step_foreach_ID_ref)(UndoStep *us,
|
|
|
|
UndoTypeForEachIDRefFn foreach_ID_ref_fn,
|
|
|
|
void *user_data);
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-01-06 18:06:11 +01:00
|
|
|
/** Information for the generic undo system to refine handling of this specific undo type. */
|
|
|
|
uint flags;
|
2019-04-17 06:17:24 +02:00
|
|
|
|
2021-01-05 17:32:46 +01:00
|
|
|
/**
|
|
|
|
* The size of the undo struct 'inherited' from #UndoStep for that specific type. Used for
|
|
|
|
* generic allocation in BKE's `undo_system.c`. */
|
|
|
|
size_t step_size;
|
2018-03-19 14:17:59 +01:00
|
|
|
} UndoType;
|
|
|
|
|
2021-01-06 18:06:11 +01:00
|
|
|
/** #UndoType.flag bitflags. */
|
|
|
|
typedef enum UndoTypeFlags {
|
|
|
|
/**
|
|
|
|
* This undo type `encode` callback needs a valid context, it will fail otherwise.
|
|
|
|
* \note Callback is still supposed to properly deal with a NULL context pointer.
|
|
|
|
*/
|
|
|
|
UNDOTYPE_FLAG_NEED_CONTEXT_FOR_ENCODE = 1 << 0,
|
|
|
|
} UndoTypeFlags;
|
|
|
|
|
2019-07-02 17:38:36 +10:00
|
|
|
/* Expose since we need to perform operations on specific undo types (rarely). */
|
2018-03-19 14:17:59 +01:00
|
|
|
extern const UndoType *BKE_UNDOSYS_TYPE_IMAGE;
|
2018-04-05 14:11:51 +02:00
|
|
|
extern const UndoType *BKE_UNDOSYS_TYPE_MEMFILE;
|
2018-03-19 14:17:59 +01:00
|
|
|
extern const UndoType *BKE_UNDOSYS_TYPE_PAINTCURVE;
|
2018-04-05 14:11:51 +02:00
|
|
|
extern const UndoType *BKE_UNDOSYS_TYPE_PARTICLE;
|
|
|
|
extern const UndoType *BKE_UNDOSYS_TYPE_SCULPT;
|
|
|
|
extern const UndoType *BKE_UNDOSYS_TYPE_TEXT;
|
2018-03-19 14:17:59 +01:00
|
|
|
|
2019-02-05 14:24:11 +11:00
|
|
|
#define BKE_UNDOSYS_TYPE_IS_MEMFILE_SKIP(ty) ELEM(ty, BKE_UNDOSYS_TYPE_IMAGE)
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
UndoStack *BKE_undosys_stack_create(void);
|
|
|
|
void BKE_undosys_stack_destroy(UndoStack *ustack);
|
|
|
|
void BKE_undosys_stack_clear(UndoStack *ustack);
|
2018-06-13 18:22:17 +02:00
|
|
|
void BKE_undosys_stack_clear_active(UndoStack *ustack);
|
2018-03-19 14:17:59 +01:00
|
|
|
bool BKE_undosys_stack_has_undo(UndoStack *ustack, const char *name);
|
|
|
|
void BKE_undosys_stack_init_from_main(UndoStack *ustack, struct Main *bmain);
|
2018-05-15 19:30:59 +02:00
|
|
|
void BKE_undosys_stack_init_from_context(UndoStack *ustack, struct bContext *C);
|
2018-03-19 14:17:59 +01:00
|
|
|
UndoStep *BKE_undosys_stack_active_with_type(UndoStack *ustack, const UndoType *ut);
|
|
|
|
UndoStep *BKE_undosys_stack_init_or_active_with_type(UndoStack *ustack, const UndoType *ut);
|
|
|
|
void BKE_undosys_stack_limit_steps_and_memory(UndoStack *ustack, int steps, size_t memory_limit);
|
2020-05-14 14:52:07 +10:00
|
|
|
#define BKE_undosys_stack_limit_steps_and_memory_defaults(ustack) \
|
|
|
|
BKE_undosys_stack_limit_steps_and_memory(ustack, U.undosteps, (size_t)U.undomemory * 1024 * 1024)
|
2018-03-19 14:17:59 +01:00
|
|
|
|
2020-10-30 20:24:13 +11:00
|
|
|
void BKE_undosys_stack_group_begin(UndoStack *ustack);
|
|
|
|
void BKE_undosys_stack_group_end(UndoStack *ustack);
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
/* Only some UndoType's require init. */
|
2018-04-05 14:11:51 +02:00
|
|
|
UndoStep *BKE_undosys_step_push_init_with_type(UndoStack *ustack,
|
|
|
|
struct bContext *C,
|
|
|
|
const char *name,
|
|
|
|
const UndoType *ut);
|
|
|
|
UndoStep *BKE_undosys_step_push_init(UndoStack *ustack, struct bContext *C, const char *name);
|
2018-03-19 14:17:59 +01:00
|
|
|
|
2020-12-27 22:15:20 +01:00
|
|
|
UndoPushReturn BKE_undosys_step_push_with_type(UndoStack *ustack,
|
|
|
|
struct bContext *C,
|
|
|
|
const char *name,
|
|
|
|
const UndoType *ut);
|
|
|
|
UndoPushReturn BKE_undosys_step_push(UndoStack *ustack, struct bContext *C, const char *name);
|
2018-03-19 14:17:59 +01:00
|
|
|
|
|
|
|
UndoStep *BKE_undosys_step_find_by_name_with_type(UndoStack *ustack,
|
|
|
|
const char *name,
|
|
|
|
const UndoType *ut);
|
2018-04-14 12:30:14 +02:00
|
|
|
UndoStep *BKE_undosys_step_find_by_type(UndoStack *ustack, const UndoType *ut);
|
2018-03-19 14:17:59 +01:00
|
|
|
UndoStep *BKE_undosys_step_find_by_name(UndoStack *ustack, const char *name);
|
|
|
|
|
2021-02-04 22:03:39 +01:00
|
|
|
eUndoStepDir BKE_undosys_step_calc_direction(const UndoStack *ustack,
|
|
|
|
const UndoStep *us_target,
|
|
|
|
const UndoStep *us_reference);
|
BKE UndoSys refactor: deduplicate and simplify code, sanitize naming.
Now we only use 'undo' or 'redo' in function names when the direction is
clear (and we assert about it). Otherwise, use 'load' instead.
When passing an undo step to BKE functions, consider calling code has
done its work and is actually passing the target step (i.e. the final
step intended to be loaded), instead of assuming we have to load the
step before/after it.
Also deduplicate and simplify a lot of core undo code in BKE, now
`BKE_undosys_step_load_data_ex` is the only place where all the complex
logic of undo/redo loop (to handle several steps in a row) is placed. We also
only use a single loop there, instead of the two existing ones in
previous code.
Note that here we consider that when we are loading the current active
step, we are undoing. This makes sense in that doing so //may// undo
some changes (ideally it should never do so), but should never, ever
redo anything.
`BKE_undosys_step_load_from_index` also gets heavily simplified, it's
not basically a shallow wrapper around
`BKE_undosys_step_load_from_index`.
And some general update of variable names, commenting, etc.
Part of T83806.
Differential Revision: https://developer.blender.org/D10227
2021-01-27 16:42:50 +01:00
|
|
|
|
|
|
|
bool BKE_undosys_step_load_data_ex(UndoStack *ustack,
|
|
|
|
struct bContext *C,
|
|
|
|
UndoStep *us_target,
|
|
|
|
UndoStep *us_reference,
|
|
|
|
const bool use_skip);
|
|
|
|
bool BKE_undosys_step_load_data(UndoStack *ustack, struct bContext *C, UndoStep *us_target);
|
|
|
|
void BKE_undosys_step_load_from_index(UndoStack *ustack, struct bContext *C, const int index);
|
|
|
|
|
2018-03-19 14:17:59 +01:00
|
|
|
bool BKE_undosys_step_undo_with_data_ex(UndoStack *ustack,
|
|
|
|
struct bContext *C,
|
|
|
|
UndoStep *us,
|
|
|
|
bool use_skip);
|
BKE UndoSys refactor: deduplicate and simplify code, sanitize naming.
Now we only use 'undo' or 'redo' in function names when the direction is
clear (and we assert about it). Otherwise, use 'load' instead.
When passing an undo step to BKE functions, consider calling code has
done its work and is actually passing the target step (i.e. the final
step intended to be loaded), instead of assuming we have to load the
step before/after it.
Also deduplicate and simplify a lot of core undo code in BKE, now
`BKE_undosys_step_load_data_ex` is the only place where all the complex
logic of undo/redo loop (to handle several steps in a row) is placed. We also
only use a single loop there, instead of the two existing ones in
previous code.
Note that here we consider that when we are loading the current active
step, we are undoing. This makes sense in that doing so //may// undo
some changes (ideally it should never do so), but should never, ever
redo anything.
`BKE_undosys_step_load_from_index` also gets heavily simplified, it's
not basically a shallow wrapper around
`BKE_undosys_step_load_from_index`.
And some general update of variable names, commenting, etc.
Part of T83806.
Differential Revision: https://developer.blender.org/D10227
2021-01-27 16:42:50 +01:00
|
|
|
bool BKE_undosys_step_undo_with_data(UndoStack *ustack, struct bContext *C, UndoStep *us_target);
|
2018-03-19 14:17:59 +01:00
|
|
|
bool BKE_undosys_step_undo(UndoStack *ustack, struct bContext *C);
|
|
|
|
|
|
|
|
bool BKE_undosys_step_redo_with_data_ex(UndoStack *ustack,
|
|
|
|
struct bContext *C,
|
|
|
|
UndoStep *us,
|
|
|
|
bool use_skip);
|
BKE UndoSys refactor: deduplicate and simplify code, sanitize naming.
Now we only use 'undo' or 'redo' in function names when the direction is
clear (and we assert about it). Otherwise, use 'load' instead.
When passing an undo step to BKE functions, consider calling code has
done its work and is actually passing the target step (i.e. the final
step intended to be loaded), instead of assuming we have to load the
step before/after it.
Also deduplicate and simplify a lot of core undo code in BKE, now
`BKE_undosys_step_load_data_ex` is the only place where all the complex
logic of undo/redo loop (to handle several steps in a row) is placed. We also
only use a single loop there, instead of the two existing ones in
previous code.
Note that here we consider that when we are loading the current active
step, we are undoing. This makes sense in that doing so //may// undo
some changes (ideally it should never do so), but should never, ever
redo anything.
`BKE_undosys_step_load_from_index` also gets heavily simplified, it's
not basically a shallow wrapper around
`BKE_undosys_step_load_from_index`.
And some general update of variable names, commenting, etc.
Part of T83806.
Differential Revision: https://developer.blender.org/D10227
2021-01-27 16:42:50 +01:00
|
|
|
bool BKE_undosys_step_redo_with_data(UndoStack *ustack, struct bContext *C, UndoStep *us_target);
|
2018-03-19 14:17:59 +01:00
|
|
|
bool BKE_undosys_step_redo(UndoStack *ustack, struct bContext *C);
|
|
|
|
|
|
|
|
UndoStep *BKE_undosys_step_same_type_next(UndoStep *us);
|
|
|
|
UndoStep *BKE_undosys_step_same_type_prev(UndoStep *us);
|
|
|
|
|
|
|
|
/* Type System */
|
|
|
|
UndoType *BKE_undosys_type_append(void (*undosys_fn)(UndoType *));
|
|
|
|
void BKE_undosys_type_free_all(void);
|
|
|
|
|
|
|
|
/* ID Accessor */
|
|
|
|
#if 0 /* functionality is only used internally for now. */
|
2019-04-17 08:24:14 +02:00
|
|
|
void BKE_undosys_foreach_ID_ref(UndoStack *ustack,
|
|
|
|
UndoTypeForEachIDRefFn foreach_ID_ref_fn,
|
|
|
|
void *user_data);
|
2018-03-19 14:17:59 +01:00
|
|
|
#endif
|
|
|
|
|
2019-02-04 15:01:55 +11:00
|
|
|
void BKE_undosys_print(UndoStack *ustack);
|
|
|
|
|
2020-03-02 15:07:49 +01:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|