This repository has been archived on 2023-10-09. You can view files and clone it. You cannot open issues or pull requests or push a commit.
Files
blender-archive/source/blender/blenloader/intern/undofile.c

Ignoring revisions in .git-blame-ignore-revs. Click here to bypass and see the normal blame view.

345 lines
10 KiB
C
Raw Normal View History

/* SPDX-License-Identifier: GPL-2.0-or-later
* Copyright 2004 Blender Foundation. All rights reserved. */
/** \file
* \ingroup blenloader
2011-02-27 20:35:41 +00:00
*/
#include <errno.h>
#include <fcntl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* open/close */
#ifndef _WIN32
# include <unistd.h>
#else
# include <io.h>
#endif
#include "MEM_guardedalloc.h"
2004-09-05 14:18:45 +00:00
#include "DNA_listBase.h"
#include "BLI_blenlib.h"
#include "BLI_ghash.h"
#include "BLO_readfile.h"
#include "BLO_undofile.h"
#include "BKE_lib_id.h"
#include "BKE_main.h"
#include "BKE_undo_system.h"
/* keep last */
#include "BLI_strict_flags.h"
/* **************** support for memory-write, for undo buffers *************** */
2015-04-18 17:33:04 +02:00
void BLO_memfile_free(MemFile *memfile)
{
MemFileChunk *chunk;
while ((chunk = BLI_pophead(&memfile->chunks))) {
if (chunk->is_identical == false) {
MEM_freeN((void *)chunk->buf);
}
MEM_freeN(chunk);
}
memfile->size = 0;
}
2015-04-18 17:33:04 +02:00
void BLO_memfile_merge(MemFile *first, MemFile *second)
{
/* We use this mapping to store the memory buffers from second memfile chunks which are not owned
* by it (i.e. shared with some previous memory steps). */
GHash *buffer_to_second_memchunk = BLI_ghash_new(
BLI_ghashutil_ptrhash, BLI_ghashutil_ptrcmp, __func__);
/* First, detect all memchunks in second memfile that are not owned by it. */
for (MemFileChunk *sc = second->chunks.first; sc != NULL; sc = sc->next) {
if (sc->is_identical) {
BLI_ghash_insert(buffer_to_second_memchunk, (void *)sc->buf, sc);
}
}
/* Now, check all chunks from first memfile (the one we are removing), and if a memchunk owned by
* it is also used by the second memfile, transfer the ownership. */
for (MemFileChunk *fc = first->chunks.first; fc != NULL; fc = fc->next) {
if (!fc->is_identical) {
MemFileChunk *sc = BLI_ghash_lookup(buffer_to_second_memchunk, fc->buf);
if (sc != NULL) {
BLI_assert(sc->is_identical);
sc->is_identical = false;
fc->is_identical = true;
}
/* Note that if the second memfile does not use that chunk, we assume that the first one
* fully owns it without sharing it with any other memfile, and hence it should be freed with
* it. */
}
}
BLI_ghash_free(buffer_to_second_memchunk, NULL, NULL);
2015-04-18 17:33:04 +02:00
BLO_memfile_free(first);
}
void BLO_memfile_clear_future(MemFile *memfile)
{
LISTBASE_FOREACH (MemFileChunk *, chunk, &memfile->chunks) {
chunk->is_identical_future = false;
}
}
void BLO_memfile_write_init(MemFileWriteData *mem_data,
MemFile *written_memfile,
MemFile *reference_memfile)
{
mem_data->written_memfile = written_memfile;
mem_data->reference_memfile = reference_memfile;
mem_data->reference_current_chunk = reference_memfile ? reference_memfile->chunks.first : NULL;
/* If we have a reference memfile, we generate a mapping between the session_uuid's of the
* IDs stored in that previous undo step, and its first matching memchunk. This will allow
* us to easily find the existing undo memory storage of IDs even when some re-ordering in
* current Main data-base broke the order matching with the memchunks from previous step.
*/
if (reference_memfile != NULL) {
mem_data->id_session_uuid_mapping = BLI_ghash_new(
BLI_ghashutil_inthash_p_simple, BLI_ghashutil_intcmp, __func__);
uint current_session_uuid = MAIN_ID_SESSION_UUID_UNSET;
LISTBASE_FOREACH (MemFileChunk *, mem_chunk, &reference_memfile->chunks) {
if (!ELEM(mem_chunk->id_session_uuid, MAIN_ID_SESSION_UUID_UNSET, current_session_uuid)) {
current_session_uuid = mem_chunk->id_session_uuid;
void **entry;
if (!BLI_ghash_ensure_p(mem_data->id_session_uuid_mapping,
POINTER_FROM_UINT(current_session_uuid),
&entry)) {
*entry = mem_chunk;
}
else {
BLI_assert(0);
}
}
}
}
}
void BLO_memfile_write_finalize(MemFileWriteData *mem_data)
{
if (mem_data->id_session_uuid_mapping != NULL) {
BLI_ghash_free(mem_data->id_session_uuid_mapping, NULL, NULL);
}
}
void BLO_memfile_chunk_add(MemFileWriteData *mem_data, const char *buf, size_t size)
{
MemFile *memfile = mem_data->written_memfile;
MemFileChunk **compchunk_step = &mem_data->reference_current_chunk;
MemFileChunk *curchunk = MEM_mallocN(sizeof(MemFileChunk), "MemFileChunk");
curchunk->size = size;
curchunk->buf = NULL;
curchunk->is_identical = false;
Undo: change depsgraph recalc flags handling to improve performance These changes only have an effect when the experimental Undo Speedup preference is enabled. * For DEG_id_tag_update, accumulate recalc flags immediately before the undo push happens instead of afterwards. Otherwise the undo state does not contain enough flags, and the current state may contain too many flags. This also means we call DEG_id_tag_update after undo with the accumulated flags to ensure they are flushed to other datablocks. * For undo, accumulate recalc flags in id->recalc and clear accumulated flags immediately. Not clearing would cause circular behavior where accumulated flags may never end up being cleared. This matches what happens after an undo push where these are also cleared, indicating that the undo state and current in-memory state match exactly. * Don't change id->recalc of identical datablocks, it should not be needed. There is one exception for armatures where pointers across datablocks exist which otherwise would cause problems. There may be a better solution to this but it seems to work in agent 327 production files. * This contains a change in undofile.c to avoid detecting all datablocks as changed for the first of the two undo steps, where we restore to the state of the last undo push before going to the one before. Without this the whole system is much less efficient. However this is unsafe in the sense that if an app handler or operators edits a datablock after an undo push, that change will not be undone. It can be argued that this is acceptable behavior, since a following undo push will include that change and this may already have unexpected side effects. Ref T60695 Differential Revision: https://developer.blender.org/D7339
2020-04-04 19:30:42 +02:00
/* This is unsafe in the sense that an app handler or other code that does not
* perform an undo push may make changes after the last undo push that
* will then not be undo. Though it's not entirely clear that is wrong behavior. */
curchunk->is_identical_future = true;
curchunk->id_session_uuid = mem_data->current_id_session_uuid;
BLI_addtail(&memfile->chunks, curchunk);
/* we compare compchunk with buf */
if (*compchunk_step != NULL) {
MemFileChunk *compchunk = *compchunk_step;
if (compchunk->size == curchunk->size) {
if (memcmp(compchunk->buf, buf, size) == 0) {
curchunk->buf = compchunk->buf;
curchunk->is_identical = true;
compchunk->is_identical_future = true;
}
}
*compchunk_step = compchunk->next;
}
/* not equal... */
if (curchunk->buf == NULL) {
char *buf_new = MEM_mallocN(size, "Chunk buffer");
memcpy(buf_new, buf, size);
curchunk->buf = buf_new;
memfile->size += size;
}
}
struct Main *BLO_memfile_main_get(struct MemFile *memfile,
struct Main *bmain,
struct Scene **r_scene)
{
struct Main *bmain_undo = NULL;
BlendFileData *bfd = BLO_read_from_memfile(bmain,
BKE_main_blendfile_path(bmain),
memfile,
&(const struct BlendFileReadParams){0},
NULL);
if (bfd) {
bmain_undo = bfd->main;
if (r_scene) {
*r_scene = bfd->curscene;
}
MEM_freeN(bfd);
}
return bmain_undo;
}
bool BLO_memfile_write_file(struct MemFile *memfile, const char *filepath)
{
MemFileChunk *chunk;
int file, oflags;
/* NOTE: This is currently used for autosave and 'quit.blend',
* where _not_ following symlinks is OK,
* however if this is ever executed explicitly by the user,
* we may want to allow writing to symlinks.
*/
oflags = O_BINARY | O_WRONLY | O_CREAT | O_TRUNC;
#ifdef O_NOFOLLOW
/* use O_NOFOLLOW to avoid writing to a symlink - use 'O_EXCL' (CVE-2008-1103) */
oflags |= O_NOFOLLOW;
#else
/* TODO(sergey): How to deal with symlinks on windows? */
# ifndef _MSC_VER
# warning "Symbolic links will be followed on undo save, possibly causing CVE-2008-1103"
# endif
#endif
file = BLI_open(filepath, oflags, 0666);
if (file == -1) {
fprintf(stderr,
"Unable to save '%s': %s\n",
filepath,
errno ? strerror(errno) : "Unknown error opening file");
return false;
}
for (chunk = memfile->chunks.first; chunk; chunk = chunk->next) {
#ifdef _WIN32
if ((size_t)write(file, chunk->buf, (uint)chunk->size) != chunk->size)
#else
if ((size_t)write(file, chunk->buf, chunk->size) != chunk->size)
#endif
{
break;
}
}
close(file);
if (chunk) {
fprintf(stderr,
"Unable to save '%s': %s\n",
filepath,
errno ? strerror(errno) : "Unknown error writing file");
return false;
}
return true;
}
static ssize_t undo_read(FileReader *reader, void *buffer, size_t size)
{
UndoReader *undo = (UndoReader *)reader;
static size_t seek = SIZE_MAX; /* The current position. */
static size_t offset = 0; /* Size of previous chunks. */
static MemFileChunk *chunk = NULL;
size_t chunkoffset, readsize, totread;
undo->memchunk_identical = true;
if (size == 0) {
return 0;
}
if (seek != (size_t)undo->reader.offset) {
chunk = undo->memfile->chunks.first;
seek = 0;
while (chunk) {
if (seek + chunk->size > (size_t)undo->reader.offset) {
break;
}
seek += chunk->size;
chunk = chunk->next;
}
offset = seek;
seek = (size_t)undo->reader.offset;
}
if (chunk) {
totread = 0;
do {
/* First check if it's on the end if current chunk. */
if (seek - offset == chunk->size) {
offset += chunk->size;
chunk = chunk->next;
}
/* Debug, should never happen. */
if (chunk == NULL) {
printf("illegal read, chunk zero\n");
return 0;
}
chunkoffset = seek - offset;
readsize = size - totread;
/* Data can be spread over multiple chunks, so clamp size
* to within this chunk, and then it will read further in
* the next chunk. */
if (chunkoffset + readsize > chunk->size) {
readsize = chunk->size - chunkoffset;
}
memcpy(POINTER_OFFSET(buffer, totread), chunk->buf + chunkoffset, readsize);
totread += readsize;
undo->reader.offset += (off64_t)readsize;
seek += readsize;
/* `is_identical` of current chunk represents whether it changed compared to previous undo
* step. this is fine in redo case, but not in undo case, where we need an extra flag
* defined when saving the next (future) step after the one we want to restore, as we are
* supposed to 'come from' that future undo step, and not the one before current one. */
undo->memchunk_identical &= undo->undo_direction == STEP_REDO ? chunk->is_identical :
chunk->is_identical_future;
} while (totread < size);
return (ssize_t)totread;
}
return 0;
}
static void undo_close(FileReader *reader)
{
MEM_freeN(reader);
}
FileReader *BLO_memfile_new_filereader(MemFile *memfile, int undo_direction)
{
UndoReader *undo = MEM_callocN(sizeof(UndoReader), __func__);
undo->memfile = memfile;
undo->undo_direction = undo_direction;
undo->reader.read = undo_read;
undo->reader.seek = NULL;
undo->reader.close = undo_close;
return (FileReader *)undo;
}