WIP: Functions: new local allocator for better memory reuse and performance #104630

Draft
Jacques Lucke wants to merge 44 commits from JacquesLucke/blender:local-allocator into main

When changing the target branch, be careful to rebase the branch in your fork to match. See documentation.
34 changed files with 710 additions and 416 deletions
Showing only changes of commit 5d149ee05b - Show all commits

View File

@ -2324,7 +2324,6 @@ class USERPREF_PT_experimental_new_features(ExperimentalPanel, Panel):
({"property": "use_sculpt_tools_tilt"}, "T82877"),
({"property": "use_extended_asset_browser"}, ("project/view/130/", "Project Page")),
({"property": "use_override_templates"}, ("T73318", "Milestone 4")),
({"property": "use_realtime_compositor"}, "T99210"),
),
)

View File

@ -3660,10 +3660,6 @@ class VIEW3D_MT_pose_propagate(Menu):
def draw(self, _context):
layout = self.layout
layout.operator("pose.propagate").mode = 'WHILE_HELD'
layout.separator()
layout.operator("pose.propagate", text="To Next Keyframe").mode = 'NEXT_KEY'
layout.operator("pose.propagate", text="To Last Keyframe (Make Cyclic)").mode = 'LAST_KEY'
@ -6216,8 +6212,7 @@ class VIEW3D_PT_shading_compositor(Panel):
@classmethod
def poll(cls, context):
return (context.space_data.shading.type in {'MATERIAL', 'RENDERED'} and
context.preferences.experimental.use_realtime_compositor)
return context.space_data.shading.type in {'MATERIAL', 'RENDERED'}
def draw(self, context):
shading = context.space_data.shading

View File

@ -410,8 +410,6 @@ bool BKE_lib_override_library_status_check_reference(struct Main *bmain, struct
* \note This is by far the biggest operation (the more time-consuming) of the three so far,
* since it has to go over all properties in depth (all overridable ones at least).
* Generating differential values and applying overrides are much cheaper.
*
* \return true if any library operation was created.
*/
void BKE_lib_override_library_operations_create(struct Main *bmain,
struct ID *local,
@ -425,6 +423,29 @@ void BKE_lib_override_library_main_operations_create(struct Main *bmain,
bool force_auto,
int *r_report_flags);
/**
* Restore forbidden modified override properties to the values of their matching properties in the
* linked reference ID.
*
* \param r_report_flags #eRNAOverrideMatchResult flags giving info about the result of this call.
*
* \note Typically used as part of BKE_lib_override_library_main_operations_create process, since
* modifying RNA properties from non-main threads is not safe.
*/
void BKE_lib_override_library_operations_restore(struct Main *bmain,
struct ID *local,
int *r_report_flags);
/**
* Restore forbidden modified override properties to the values of their matching properties in the
* linked reference ID, for all liboverride IDs tagged as needing such process in given `bmain`.
*
* \param r_report_flags #eRNAOverrideMatchResult flags giving info about the result of this call.
*
* \note Typically used as part of BKE_lib_override_library_main_operations_create process, since
* modifying RNA properties from non-main threads is not safe.
*/
void BKE_lib_override_library_main_operations_restore(struct Main *bmain, int *r_report_flags);
/**
* Reset all overrides in given \a id_root, while preserving ID relations.
*

View File

@ -3297,7 +3297,10 @@ bool BKE_lib_override_library_status_check_reference(Main *bmain, ID *local)
return true;
}
void BKE_lib_override_library_operations_create(Main *bmain, ID *local, int *r_report_flags)
static void lib_override_library_operations_create(Main *bmain,
ID *local,
const eRNAOverrideMatch override_match_flags,
eRNAOverrideMatchResult *r_report_flags)
{
BLI_assert(!ID_IS_LINKED(local));
BLI_assert(local->override_library != nullptr);
@ -3330,19 +3333,24 @@ void BKE_lib_override_library_operations_create(Main *bmain, ID *local, int *r_r
RNA_id_pointer_create(local->override_library->reference, &rnaptr_reference);
eRNAOverrideMatchResult local_report_flags = RNA_OVERRIDE_MATCH_RESULT_INIT;
RNA_struct_override_matches(
bmain,
&rnaptr_local,
&rnaptr_reference,
nullptr,
0,
local->override_library,
(eRNAOverrideMatch)(RNA_OVERRIDE_COMPARE_CREATE | RNA_OVERRIDE_COMPARE_RESTORE),
&local_report_flags);
RNA_struct_override_matches(bmain,
&rnaptr_local,
&rnaptr_reference,
nullptr,
0,
local->override_library,
override_match_flags,
&local_report_flags);
if (local_report_flags & RNA_OVERRIDE_MATCH_RESULT_RESTORED) {
CLOG_INFO(&LOG, 2, "We did restore some properties of %s from its reference", local->name);
}
if (local_report_flags & RNA_OVERRIDE_MATCH_RESULT_RESTORE_TAGGED) {
CLOG_INFO(&LOG,
2,
"We did tag some properties of %s for restoration from its reference",
local->name);
}
if (local_report_flags & RNA_OVERRIDE_MATCH_RESULT_CREATED) {
CLOG_INFO(&LOG, 2, "We did generate library override rules for %s", local->name);
}
@ -3351,10 +3359,50 @@ void BKE_lib_override_library_operations_create(Main *bmain, ID *local, int *r_r
}
if (r_report_flags != nullptr) {
*r_report_flags |= local_report_flags;
*r_report_flags = static_cast<eRNAOverrideMatchResult>(*r_report_flags | local_report_flags);
}
}
}
void BKE_lib_override_library_operations_create(Main *bmain, ID *local, int *r_report_flags)
{
lib_override_library_operations_create(
bmain,
local,
static_cast<eRNAOverrideMatch>(RNA_OVERRIDE_COMPARE_CREATE | RNA_OVERRIDE_COMPARE_RESTORE),
reinterpret_cast<eRNAOverrideMatchResult *>(r_report_flags));
}
void BKE_lib_override_library_operations_restore(Main *bmain, ID *local, int *r_report_flags)
{
if (!ID_IS_OVERRIDE_LIBRARY_REAL(local) || (local->override_library->runtime->tag &
IDOVERRIDE_LIBRARY_RUNTIME_TAG_NEEDS_RESTORE) == 0) {
return;
}
PointerRNA rnaptr_src, rnaptr_dst;
RNA_id_pointer_create(local, &rnaptr_dst);
RNA_id_pointer_create(local->override_library->reference, &rnaptr_src);
RNA_struct_override_apply(
bmain,
&rnaptr_dst,
&rnaptr_src,
nullptr,
local->override_library,
static_cast<eRNAOverrideApplyFlag>(RNA_OVERRIDE_APPLY_FLAG_SKIP_RESYNC_CHECK |
RNA_OVERRIDE_APPLY_FLAG_RESTORE_ONLY));
LISTBASE_FOREACH_MUTABLE (
IDOverrideLibraryProperty *, op, &local->override_library->properties) {
if (op->tag & IDOVERRIDE_LIBRARY_PROPERTY_TAG_NEEDS_RETORE) {
BKE_lib_override_library_property_delete(local->override_library, op);
}
}
local->override_library->runtime->tag &= ~IDOVERRIDE_LIBRARY_RUNTIME_TAG_NEEDS_RESTORE;
if (r_report_flags != nullptr) {
*r_report_flags |= RNA_OVERRIDE_MATCH_RESULT_RESTORED;
}
}
struct LibOverrideOpCreateData {
Main *bmain;
@ -3368,8 +3416,12 @@ static void lib_override_library_operations_create_cb(TaskPool *__restrict pool,
ID *id = static_cast<ID *>(taskdata);
eRNAOverrideMatchResult report_flags = RNA_OVERRIDE_MATCH_RESULT_INIT;
BKE_lib_override_library_operations_create(
create_data->bmain, id, reinterpret_cast<int *>(&report_flags));
lib_override_library_operations_create(
create_data->bmain,
id,
static_cast<eRNAOverrideMatch>(RNA_OVERRIDE_COMPARE_CREATE |
RNA_OVERRIDE_COMPARE_TAG_FOR_RESTORE),
&report_flags);
atomic_fetch_and_or_uint32(reinterpret_cast<uint32_t *>(&create_data->report_flags),
report_flags);
}
@ -3443,6 +3495,13 @@ void BKE_lib_override_library_main_operations_create(Main *bmain,
BLI_task_pool_free(task_pool);
if (create_pool_data.report_flags & RNA_OVERRIDE_MATCH_RESULT_RESTORE_TAGGED) {
BKE_lib_override_library_main_operations_restore(
bmain, reinterpret_cast<int *>(&create_pool_data.report_flags));
create_pool_data.report_flags = static_cast<eRNAOverrideMatchResult>(
(create_pool_data.report_flags & ~RNA_OVERRIDE_MATCH_RESULT_RESTORE_TAGGED));
}
if (r_report_flags != nullptr) {
*r_report_flags |= create_pool_data.report_flags;
}
@ -3456,6 +3515,28 @@ void BKE_lib_override_library_main_operations_create(Main *bmain,
#endif
}
void BKE_lib_override_library_main_operations_restore(Main *bmain, int *r_report_flags)
{
ID *id;
FOREACH_MAIN_ID_BEGIN (bmain, id) {
if (!(!ID_IS_LINKED(id) && ID_IS_OVERRIDE_LIBRARY_REAL(id) &&
(id->override_library->runtime->tag & IDOVERRIDE_LIBRARY_RUNTIME_TAG_NEEDS_RESTORE) !=
0)) {
continue;
}
/* Only restore overrides if we do have the real reference data available, and not some empty
* 'placeholder' for missing data (broken links). */
if (id->override_library->reference->tag & LIB_TAG_MISSING) {
continue;
}
BKE_lib_override_library_operations_restore(bmain, id, r_report_flags);
}
FOREACH_MAIN_ID_END;
}
static bool lib_override_library_id_reset_do(Main *bmain,
ID *id_root,
const bool do_reset_system_override)

View File

@ -116,15 +116,16 @@ set(GLSL_SRC
shaders/compositor_normalize.glsl
shaders/compositor_parallel_reduction.glsl
shaders/compositor_projector_lens_distortion.glsl
shaders/compositor_read_pass.glsl
shaders/compositor_realize_on_domain.glsl
shaders/compositor_screen_lens_distortion.glsl
shaders/compositor_set_alpha.glsl
shaders/compositor_split_viewer.glsl
shaders/compositor_symmetric_blur.glsl
shaders/compositor_symmetric_blur_variable_size.glsl
shaders/compositor_symmetric_separable_blur.glsl
shaders/compositor_tone_map_photoreceptor.glsl
shaders/compositor_tone_map_simple.glsl
shaders/compositor_write_output.glsl
shaders/library/gpu_shader_compositor_alpha_over.glsl
shaders/library/gpu_shader_compositor_blur_common.glsl
@ -204,15 +205,16 @@ set(SRC_SHADER_CREATE_INFOS
shaders/infos/compositor_normalize_info.hh
shaders/infos/compositor_parallel_reduction_info.hh
shaders/infos/compositor_projector_lens_distortion_info.hh
shaders/infos/compositor_read_pass_info.hh
shaders/infos/compositor_realize_on_domain_info.hh
shaders/infos/compositor_screen_lens_distortion_info.hh
shaders/infos/compositor_set_alpha_info.hh
shaders/infos/compositor_split_viewer_info.hh
shaders/infos/compositor_symmetric_blur_info.hh
shaders/infos/compositor_symmetric_blur_variable_size_info.hh
shaders/infos/compositor_symmetric_separable_blur_info.hh
shaders/infos/compositor_tone_map_photoreceptor_info.hh
shaders/infos/compositor_tone_map_simple_info.hh
shaders/infos/compositor_write_output_info.hh
)
set(SHADER_CREATE_INFOS_CONTENT "")

View File

@ -6,6 +6,7 @@
#include "BLI_string_ref.hh"
#include "DNA_scene_types.h"
#include "DNA_vec_types.h"
#include "GPU_texture.h"
@ -41,8 +42,17 @@ class Context {
/* Get the active compositing scene. */
virtual const Scene *get_scene() const = 0;
/* Get the dimensions of the output. */
virtual int2 get_output_size() = 0;
/* Get the width and height of the render passes and of the output texture returned by the
* get_input_texture and get_output_texture methods respectively. */
virtual int2 get_render_size() const = 0;
/* Get the rectangular region representing the area of the input that the compositor will operate
* on. Conversely, the compositor will only update the region of the output that corresponds to
* the compositing region. In the base case, the compositing region covers the entirety of the
* render region with a lower bound of zero and an upper bound of the render size returned by the
* get_render_size method. In other cases, the compositing region might be a subset of the render
* region. */
virtual rcti get_compositing_region() const = 0;
/* Get the texture representing the output where the result of the compositor should be
* written. This should be called by output nodes to get their target texture. */
@ -60,6 +70,9 @@ class Context {
* appropriate place, which can be directly in the UI or just logged to the output stream. */
virtual void set_info_message(StringRef message) const = 0;
/* Get the size of the compositing region. See get_compositing_region(). */
int2 get_compositing_region_size() const;
/* Get the current frame number of the active scene. */
int get_frame_number() const;

View File

@ -1,5 +1,9 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "BLI_rect.h"
#include "DNA_vec_types.h"
#include "COM_context.hh"
#include "COM_static_cache_manager.hh"
#include "COM_static_shader_manager.hh"
@ -11,6 +15,12 @@ Context::Context(TexturePool &texture_pool) : texture_pool_(texture_pool)
{
}
int2 Context::get_compositing_region_size() const
{
const rcti compositing_region = get_compositing_region();
return int2(BLI_rcti_size_x(&compositing_region), BLI_rcti_size_y(&compositing_region));
}
int Context::get_frame_number() const
{
return get_scene()->r.cfra;

View File

@ -0,0 +1,8 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
vec4 pass_color = texture_load(input_tx, texel + compositing_region_lower_bound);
imageStore(output_img, texel, READ_EXPRESSION(pass_color));
}

View File

@ -1,8 +0,0 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
vec4 color = vec4(texture_load(image_tx, texel).rgb, texture_load(alpha_tx, texel).x);
imageStore(output_img, texel, color);
}

View File

@ -10,5 +10,5 @@ void main()
#endif
vec4 color = condition ? texture_load(first_image_tx, texel) :
texture_load(second_image_tx, texel);
imageStore(output_img, texel, color);
imageStore(output_img, texel + compositing_region_lower_bound, color);
}

View File

@ -0,0 +1,18 @@
#pragma BLENDER_REQUIRE(gpu_shader_compositor_texture_utilities.glsl)
void main()
{
ivec2 texel = ivec2(gl_GlobalInvocationID.xy);
vec4 input_color = texture_load(input_tx, texel);
#if defined(DIRECT_OUTPUT)
vec4 output_color = input_color;
#elif defined(OPAQUE_OUTPUT)
vec4 output_color = vec4(input_color.rgb, 1.0);
#elif defined(ALPHA_OUTPUT)
float alpha = texture_load(alpha_tx, texel).x;
vec4 output_color = vec4(input_color.rgb, alpha);
#endif
imageStore(output_img, texel + compositing_region_lower_bound, output_color);
}

View File

@ -61,9 +61,3 @@ GPU_SHADER_CREATE_INFO(compositor_convert_float_to_half_float)
.image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.define("CONVERT_EXPRESSION(value)", "vec4(value.r, vec3(0.0))")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_convert_color_to_opaque)
.additional_info("compositor_convert_shared")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.define("CONVERT_EXPRESSION(value)", "vec4(value.rgb, 1.0)")
.do_static_compilation(true);

View File

@ -0,0 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_read_pass_shared)
.local_group_size(16, 16)
.push_constant(Type::IVEC2, "compositing_region_lower_bound")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.compute_source("compositor_read_pass.glsl");
GPU_SHADER_CREATE_INFO(compositor_read_pass)
.additional_info("compositor_read_pass_shared")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.define("READ_EXPRESSION(pass_color)", "pass_color")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_read_pass_alpha)
.additional_info("compositor_read_pass_shared")
.image(0, GPU_R16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.define("READ_EXPRESSION(pass_color)", "vec4(pass_color.a, vec3(0.0))")
.do_static_compilation(true);

View File

@ -1,11 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_set_alpha)
.local_group_size(16, 16)
.sampler(0, ImageType::FLOAT_2D, "image_tx")
.sampler(1, ImageType::FLOAT_2D, "alpha_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_set_alpha.glsl")
.do_static_compilation(true);

View File

@ -6,6 +6,7 @@ GPU_SHADER_CREATE_INFO(compositor_split_viewer_shared)
.local_group_size(16, 16)
.push_constant(Type::FLOAT, "split_ratio")
.push_constant(Type::IVEC2, "view_size")
.push_constant(Type::IVEC2, "compositing_region_lower_bound")
.sampler(0, ImageType::FLOAT_2D, "first_image_tx")
.sampler(1, ImageType::FLOAT_2D, "second_image_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")

View File

@ -0,0 +1,26 @@
/* SPDX-License-Identifier: GPL-2.0-or-later */
#include "gpu_shader_create_info.hh"
GPU_SHADER_CREATE_INFO(compositor_write_output_shared)
.local_group_size(16, 16)
.push_constant(Type::IVEC2, "compositing_region_lower_bound")
.sampler(0, ImageType::FLOAT_2D, "input_tx")
.image(0, GPU_RGBA16F, Qualifier::WRITE, ImageType::FLOAT_2D, "output_img")
.compute_source("compositor_write_output.glsl");
GPU_SHADER_CREATE_INFO(compositor_write_output)
.additional_info("compositor_write_output_shared")
.define("DIRECT_OUTPUT")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_write_output_opaque)
.additional_info("compositor_write_output_shared")
.define("OPAQUE_OUTPUT")
.do_static_compilation(true);
GPU_SHADER_CREATE_INFO(compositor_write_output_alpha)
.additional_info("compositor_write_output_shared")
.sampler(1, ImageType::FLOAT_2D, "alpha_tx")
.define("ALPHA_OUTPUT")
.do_static_compilation(true);

View File

@ -2,16 +2,23 @@
#include "BLI_listbase.h"
#include "BLI_math_vec_types.hh"
#include "BLI_rect.h"
#include "BLI_string_ref.hh"
#include "BLI_utildefines.h"
#include "BLT_translation.h"
#include "DNA_ID_enums.h"
#include "DNA_camera_types.h"
#include "DNA_object_types.h"
#include "DNA_scene_types.h"
#include "DNA_vec_types.h"
#include "DNA_view3d_types.h"
#include "DEG_depsgraph_query.h"
#include "ED_view3d.h"
#include "DRW_render.h"
#include "IMB_colormanagement.h"
@ -50,11 +57,68 @@ class Context : public realtime_compositor::Context {
return DRW_context_state_get()->scene;
}
int2 get_output_size() override
int2 get_render_size() const override
{
return int2(float2(DRW_viewport_size_get()));
}
/* Returns true if the viewport is in camera view and has an opaque passepartout, that is, the
* area outside of the camera border is not visible. */
bool is_opaque_camera_view() const
{
/* Check if the viewport is in camera view. */
if (DRW_context_state_get()->rv3d->persp != RV3D_CAMOB) {
return false;
}
/* Check if the camera object that is currently in view is an actual camera. It is possible for
* a non camera object to be used as a camera, in which case, there will be no passepartout or
* any other camera setting, so those pseudo cameras can be ignored. */
Object *camera_object = DRW_context_state_get()->v3d->camera;
if (camera_object->type != OB_CAMERA) {
return false;
}
/* Check if the camera has passepartout active and is totally opaque. */
Camera *cam = static_cast<Camera *>(camera_object->data);
if (!(cam->flag & CAM_SHOWPASSEPARTOUT) || cam->passepartalpha != 1.0f) {
return false;
}
return true;
}
rcti get_compositing_region() const override
{
const int2 viewport_size = int2(float2(DRW_viewport_size_get()));
const rcti render_region = rcti{0, viewport_size.x, 0, viewport_size.y};
/* If the camera view is not opaque, that means the content outside of the camera region is
* visible to some extent, so it would make sense to include them in the compositing region.
* Otherwise, we limit the compositing region to the visible camera region because anything
* outside of the camera region will not be visible anyways. */
if (!is_opaque_camera_view()) {
return render_region;
}
rctf camera_border;
ED_view3d_calc_camera_border(DRW_context_state_get()->scene,
DRW_context_state_get()->depsgraph,
DRW_context_state_get()->region,
DRW_context_state_get()->v3d,
DRW_context_state_get()->rv3d,
&camera_border,
false);
rcti camera_region;
BLI_rcti_rctf_copy_floor(&camera_region, &camera_border);
rcti visible_camera_region;
BLI_rcti_isect(&render_region, &camera_region, &visible_camera_region);
return visible_camera_region;
}
GPUTexture *get_output_texture() override
{
return DRW_viewport_texture_list_get()->color;
@ -83,36 +147,36 @@ class Engine {
TexturePool texture_pool_;
Context context_;
realtime_compositor::Evaluator evaluator_;
/* Stores the viewport size at the time the last compositor evaluation happened. See the
* update_viewport_size method for more information. */
int2 last_viewport_size_;
/* Stores the compositing region size at the time the last compositor evaluation happened. See
* the update_compositing_region_size method for more information. */
int2 last_compositing_region_size_;
public:
Engine(char *info_message)
: context_(texture_pool_, info_message),
evaluator_(context_),
last_viewport_size_(context_.get_output_size())
last_compositing_region_size_(context_.get_compositing_region_size())
{
}
/* Update the viewport size and evaluate the compositor. */
/* Update the compositing region size and evaluate the compositor. */
void draw()
{
update_viewport_size();
update_compositing_region_size();
evaluator_.evaluate();
}
/* If the size of the viewport changed from the last time the compositor was evaluated, update
* the viewport size and reset the evaluator. That's because the evaluator compiles the node tree
* in a manner that is specifically optimized for the size of the viewport. This should be called
* before evaluating the compositor. */
void update_viewport_size()
/* If the size of the compositing region changed from the last time the compositor was evaluated,
* update the last compositor region size and reset the evaluator. That's because the evaluator
* compiles the node tree in a manner that is specifically optimized for the size of the
* compositing region. This should be called before evaluating the compositor. */
void update_compositing_region_size()
{
if (last_viewport_size_ == context_.get_output_size()) {
if (last_compositing_region_size_ == context_.get_compositing_region_size()) {
return;
}
last_viewport_size_ = context_.get_output_size();
last_compositing_region_size_ = context_.get_compositing_region_size();
evaluator_.reset();
}

View File

@ -1244,10 +1244,6 @@ static void drw_engines_enable_editors(void)
static bool is_compositor_enabled(void)
{
if (!U.experimental.use_realtime_compositor) {
return false;
}
if (DST.draw_ctx.v3d->shading.use_compositor == V3D_SHADING_USE_COMPOSITOR_DISABLED) {
return false;
}

View File

@ -1753,10 +1753,8 @@ void POSE_OT_blend_to_neighbors(wmOperatorType *ot)
/* "termination conditions" - i.e. when we stop */
typedef enum ePosePropagate_Termination {
/** Stop after the current hold ends. */
POSE_PROPAGATE_SMART_HOLDS = 0,
/** Only do on the last keyframe. */
POSE_PROPAGATE_LAST_KEY,
POSE_PROPAGATE_LAST_KEY = 0,
/** Stop after the next keyframe. */
POSE_PROPAGATE_NEXT_KEY,
/** Stop after the specified frame. */
@ -1772,84 +1770,25 @@ typedef enum ePosePropagate_Termination {
/* --------------------------------- */
/**
* Get frame on which the "hold" for the bone ends.
* XXX: this may not really work that well if a bone moves on some channels and not others
* if this happens to be a major issue, scrap this, and just make this happen
* independently per F-Curve
*/
static float pose_propagate_get_boneHoldEndFrame(tPChanFCurveLink *pfl, float startFrame)
typedef struct FrameLink {
struct FrameLink *next, *prev;
float frame;
} FrameLink;
static void propagate_curve_values(ListBase /*tPChanFCurveLink*/ *pflinks,
const float source_frame,
ListBase /*FrameLink*/ *target_frames)
{
struct AnimKeylist *keylist = ED_keylist_create();
Object *ob = pfl->ob;
AnimData *adt = ob->adt;
LinkData *ld;
float endFrame = startFrame;
for (ld = pfl->fcurves.first; ld; ld = ld->next) {
FCurve *fcu = (FCurve *)ld->data;
fcurve_to_keylist(adt, fcu, keylist, 0);
}
ED_keylist_prepare_for_direct_access(keylist);
/* Find the long keyframe (i.e. hold), and hence obtain the endFrame value
* - the best case would be one that starts on the frame itself
*/
const ActKeyColumn *ab = ED_keylist_find_exact(keylist, startFrame);
/* There are only two cases for no-exact match:
* 1) the current frame is just before another key but not on a key itself
* 2) the current frame is on a key, but that key doesn't link to the next
*
* If we've got the first case, then we can search for another block,
* otherwise forget it, as we'd be overwriting some valid data.
*/
if (ab == NULL) {
/* We've got case 1, so try the one after. */
ab = ED_keylist_find_next(keylist, startFrame);
if ((actkeyblock_get_valid_hold(ab) & ACTKEYBLOCK_FLAG_STATIC_HOLD) == 0) {
/* Try the block before this frame then as last resort. */
ab = ED_keylist_find_prev(keylist, startFrame);
LISTBASE_FOREACH (tPChanFCurveLink *, pfl, pflinks) {
LISTBASE_FOREACH (LinkData *, ld, &pfl->fcurves) {
FCurve *fcu = (FCurve *)ld->data;
const float current_fcu_value = evaluate_fcurve(fcu, source_frame);
LISTBASE_FOREACH (FrameLink *, target_frame, target_frames) {
insert_vert_fcurve(
fcu, target_frame->frame, current_fcu_value, BEZT_KEYTYPE_KEYFRAME, INSERTKEY_NEEDED);
}
}
}
/* Whatever happens, stop searching now.... */
if ((actkeyblock_get_valid_hold(ab) & ACTKEYBLOCK_FLAG_STATIC_HOLD) == 0) {
/* Restrict range to just the frame itself
* i.e. everything is in motion, so no holds to safely overwrite. */
ab = NULL;
}
/* Check if we can go any further than we've already gone. */
if (ab) {
/* Go to next if it is also valid and meets "extension" criteria. */
while (ab->next) {
const ActKeyColumn *abn = ab->next;
/* Must be valid. */
if ((actkeyblock_get_valid_hold(abn) & ACTKEYBLOCK_FLAG_STATIC_HOLD) == 0) {
break;
}
/* Should have the same number of curves. */
if (ab->totblock != abn->totblock) {
break;
}
/* We can extend the bounds to the end of this "next" block now. */
ab = abn;
}
/* End frame can now take the value of the end of the block. */
endFrame = ab->next->cfra;
}
/* Free temp memory. */
ED_keylist_free(keylist);
/* Return the end frame we've found. */
return endFrame;
}
/**
@ -1902,27 +1841,6 @@ static void pose_propagate_fcurve(FCurve *fcu, float start_frame, const float en
}
}
typedef struct FrameLink {
struct FrameLink *next, *prev;
float frame;
} FrameLink;
static void propagate_curve_values(ListBase /*tPChanFCurveLink*/ *pflinks,
const float source_frame,
ListBase /*FrameLink*/ *target_frames)
{
LISTBASE_FOREACH (tPChanFCurveLink *, pfl, pflinks) {
LISTBASE_FOREACH (LinkData *, ld, &pfl->fcurves) {
FCurve *fcu = (FCurve *)ld->data;
const float current_fcu_value = evaluate_fcurve(fcu, source_frame);
LISTBASE_FOREACH (FrameLink *, target_frame, target_frames) {
insert_vert_fcurve(
fcu, target_frame->frame, current_fcu_value, BEZT_KEYTYPE_KEYFRAME, INSERTKEY_NEEDED);
}
}
}
}
static float find_next_key(ListBase *pflinks, const float start_frame)
{
float target_frame = FLT_MAX;
@ -2084,21 +2002,6 @@ static int pose_propagate_exec(bContext *C, wmOperator *op)
propagate_curve_values(&pflinks, current_frame, &target_frames);
break;
}
case POSE_PROPAGATE_SMART_HOLDS: {
/* For each bone, perform the copying required. */
LISTBASE_FOREACH (tPChanFCurveLink *, pfl, &pflinks) {
/* Mode-specific data preprocessing (requiring access to all curves). */
/* We store in endFrame the end frame of the "long keyframe" (i.e. a held value)
* starting from the keyframe that occurs after the current frame. */
const int smart_end_frame = pose_propagate_get_boneHoldEndFrame(pfl, current_frame);
/* Go through propagating pose to keyframes, curve by curve. */
LISTBASE_FOREACH (LinkData *, ld, &pfl->fcurves) {
pose_propagate_fcurve((FCurve *)ld->data, current_frame, smart_end_frame);
}
}
break;
}
}
BLI_freelistN(&target_frames);
@ -2120,11 +2023,6 @@ static int pose_propagate_exec(bContext *C, wmOperator *op)
void POSE_OT_propagate(wmOperatorType *ot)
{
static const EnumPropertyItem terminate_items[] = {
{POSE_PROPAGATE_SMART_HOLDS,
"WHILE_HELD",
0,
"While Held",
"Propagate pose to all keyframes after current frame that don't change (Default behavior)"},
{POSE_PROPAGATE_NEXT_KEY,
"NEXT_KEY",
0,
@ -2177,7 +2075,7 @@ void POSE_OT_propagate(wmOperatorType *ot)
ot->prop = RNA_def_enum(ot->srna,
"mode",
terminate_items,
POSE_PROPAGATE_SMART_HOLDS,
POSE_PROPAGATE_NEXT_KEY,
"Terminate Mode",
"Method used to determine when to stop propagating pose to keyframes");
RNA_def_float(ot->srna,

View File

@ -2951,6 +2951,11 @@ static int frame_offset_exec(bContext *C, wmOperator *op)
int delta = RNA_int_get(op->ptr, "delta");
/* In order to jump from e.g. 1.5 to 1 the delta needs to be incremented by 1 since the subframe
* is always zeroed. Otherwise it would jump to 0.*/
if (delta < 0 && scene->r.subframe > 0) {
delta += 1;
}
scene->r.cfra += delta;
FRAMENUMBER_MIN_CLAMP(scene->r.cfra);
scene->r.subframe = 0.0f;
@ -3062,7 +3067,7 @@ static int keyframe_jump_exec(bContext *C, wmOperator *op)
return OPERATOR_CANCELLED;
}
float cfra = (float)(scene->r.cfra);
const float cfra = BKE_scene_frame_get(scene);
/* Initialize binary-tree-list for getting keyframes. */
struct AnimKeylist *keylist = ED_keylist_create();
@ -3096,25 +3101,26 @@ static int keyframe_jump_exec(bContext *C, wmOperator *op)
/* find matching keyframe in the right direction */
const ActKeyColumn *ak;
if (next) {
ak = ED_keylist_find_next(keylist, cfra);
}
else {
ak = ED_keylist_find_prev(keylist, cfra);
while ((ak != NULL) && (done == false)) {
if (cfra < ak->cfra) {
BKE_scene_frame_set(scene, ak->cfra);
done = true;
}
else {
ak = ak->next;
}
}
}
while ((ak != NULL) && (done == false)) {
if (scene->r.cfra != (int)ak->cfra) {
/* this changes the frame, so set the frame and we're done */
const int whole_frame = (int)ak->cfra;
scene->r.cfra = whole_frame;
scene->r.subframe = ak->cfra - whole_frame;
done = true;
}
else {
/* take another step... */
if (next) {
ak = ak->next;
else {
ak = ED_keylist_find_prev(keylist, cfra);
while ((ak != NULL) && (done == false)) {
if (cfra > ak->cfra) {
BKE_scene_frame_set(scene, ak->cfra);
done = true;
}
else {
ak = ak->prev;

View File

@ -154,18 +154,23 @@ static void buttons_texture_users_find_nodetree(ListBase *users,
}
}
static void buttons_texture_modifier_geonodes_users_add(Object *ob,
NodesModifierData *nmd,
bNodeTree *node_tree,
ListBase *users)
static void buttons_texture_modifier_geonodes_users_add(
Object *ob,
NodesModifierData *nmd,
bNodeTree *node_tree,
ListBase *users,
blender::Set<const bNodeTree *> &handled_groups)
{
PointerRNA ptr;
PropertyRNA *prop;
for (bNode *node : node_tree->all_nodes()) {
if (node->type == NODE_GROUP && node->id) {
/* Recurse into the node group */
buttons_texture_modifier_geonodes_users_add(ob, nmd, (bNodeTree *)node->id, users);
if (handled_groups.add(reinterpret_cast<bNodeTree *>(node->id))) {
/* Recurse into the node group */
buttons_texture_modifier_geonodes_users_add(
ob, nmd, (bNodeTree *)node->id, users, handled_groups);
}
}
LISTBASE_FOREACH (bNodeSocket *, socket, &node->inputs) {
if (socket->flag & SOCK_UNAVAIL) {
@ -205,7 +210,8 @@ static void buttons_texture_modifier_foreach(void *userData,
if (md->type == eModifierType_Nodes) {
NodesModifierData *nmd = (NodesModifierData *)md;
if (nmd->node_group != nullptr) {
buttons_texture_modifier_geonodes_users_add(ob, nmd, nmd->node_group, users);
blender::Set<const bNodeTree *> handled_groups;
buttons_texture_modifier_geonodes_users_add(ob, nmd, nmd->node_group, users, handled_groups);
}
}
else {

View File

@ -3086,10 +3086,6 @@ static void snode_setup_v2d(SpaceNode &snode, ARegion &region, const float2 &cen
/* Similar to is_compositor_enabled() in draw_manager.c but checks all 3D views. */
static bool realtime_compositor_is_in_use(const bContext &context)
{
if (!U.experimental.use_realtime_compositor) {
return false;
}
const Scene *scene = CTX_data_scene(&context);
if (!scene->use_nodes) {
return false;

View File

@ -274,6 +274,9 @@ typedef struct IDOverrideLibraryProperty {
enum {
/** This override property (operation) is unused and should be removed by cleanup process. */
IDOVERRIDE_LIBRARY_TAG_UNUSED = 1 << 0,
/** This override property is forbidden and should be restored to its linked reference value. */
IDOVERRIDE_LIBRARY_PROPERTY_TAG_NEEDS_RETORE = 1 << 1,
};
#
@ -287,6 +290,12 @@ typedef struct IDOverrideLibraryRuntime {
enum {
/** This override needs to be reloaded. */
IDOVERRIDE_LIBRARY_RUNTIME_TAG_NEEDS_RELOAD = 1 << 0,
/**
* This override contains properties with forbidden changes, which should be restored to their
* linked reference value.
*/
IDOVERRIDE_LIBRARY_RUNTIME_TAG_NEEDS_RESTORE = 1 << 1,
};
/* Main container for all overriding data info of a data-block. */

View File

@ -652,8 +652,6 @@ typedef struct UserDef_Experimental {
char use_override_templates;
char enable_eevee_next;
char use_sculpt_texture_paint;
char use_realtime_compositor;
char _pad0[7];
/** `makesdna` does not allow empty structs. */
} UserDef_Experimental;

View File

@ -798,8 +798,10 @@ typedef enum eRNAOverrideMatch {
/** Create new property override if needed and possible. */
RNA_OVERRIDE_COMPARE_CREATE = 1 << 16,
/** Restore property's value(s) to reference ones if needed and possible. */
/** Restore property's value(s) to reference ones, if needed and possible. */
RNA_OVERRIDE_COMPARE_RESTORE = 1 << 17,
/** Tag for restoration of property's value(s) to reference ones, if needed and possible. */
RNA_OVERRIDE_COMPARE_TAG_FOR_RESTORE = 1 << 18,
} eRNAOverrideMatch;
typedef enum eRNAOverrideMatchResult {
@ -810,8 +812,13 @@ typedef enum eRNAOverrideMatchResult {
* differences between local and reference.
*/
RNA_OVERRIDE_MATCH_RESULT_CREATED = 1 << 0,
/**
* Some properties are illegaly different from their reference values and have been tagged for
* restoration.
*/
RNA_OVERRIDE_MATCH_RESULT_RESTORE_TAGGED = 1 << 1,
/** Some properties were reset to reference values. */
RNA_OVERRIDE_MATCH_RESULT_RESTORED = 1 << 1,
RNA_OVERRIDE_MATCH_RESULT_RESTORED = 1 << 2,
} eRNAOverrideMatchResult;
typedef enum eRNAOverrideStatus {
@ -861,6 +868,12 @@ typedef enum eRNAOverrideApplyFlag {
* pointers properties, unless the destination original value (the one being overridden) is NULL.
*/
RNA_OVERRIDE_APPLY_FLAG_IGNORE_ID_POINTERS = 1 << 0,
/** Do not check for liboverrides needing resync with their linked reference data. */
RNA_OVERRIDE_APPLY_FLAG_SKIP_RESYNC_CHECK = 1 << 1,
/** Only perform restore operations. */
RNA_OVERRIDE_APPLY_FLAG_RESTORE_ONLY = 1 << 2,
} eRNAOverrideApplyFlag;
/**

View File

@ -633,6 +633,7 @@ bool RNA_struct_override_matches(Main *bmain,
const bool ignore_overridden = (flags & RNA_OVERRIDE_COMPARE_IGNORE_OVERRIDDEN) != 0;
const bool do_create = (flags & RNA_OVERRIDE_COMPARE_CREATE) != 0;
const bool do_restore = (flags & RNA_OVERRIDE_COMPARE_RESTORE) != 0;
const bool do_tag_for_restore = (flags & RNA_OVERRIDE_COMPARE_TAG_FOR_RESTORE) != 0;
#ifdef DEBUG_OVERRIDE_TIMEIT
static float _sum_time_global = 0.0f;
@ -779,7 +780,7 @@ bool RNA_struct_override_matches(Main *bmain,
}
#endif
eRNAOverrideMatchResult report_flags = 0;
eRNAOverrideMatchResult report_flags = RNA_OVERRIDE_MATCH_RESULT_INIT;
const int diff = rna_property_override_diff(bmain,
&prop_local,
&prop_reference,
@ -800,7 +801,7 @@ bool RNA_struct_override_matches(Main *bmain,
matching = matching && diff == 0;
if (r_report_flags) {
*r_report_flags |= report_flags;
*r_report_flags = (*r_report_flags | report_flags);
}
if (diff != 0) {
@ -812,29 +813,52 @@ bool RNA_struct_override_matches(Main *bmain,
BKE_lib_override_library_operations_tag(op, IDOVERRIDE_LIBRARY_TAG_UNUSED, false);
}
if (do_restore && (report_flags & RNA_OVERRIDE_MATCH_RESULT_CREATED) == 0) {
if ((do_restore || do_tag_for_restore) &&
(report_flags & RNA_OVERRIDE_MATCH_RESULT_CREATED) == 0) {
/* We are allowed to restore to reference's values. */
if (ELEM(NULL, op, opop) || opop->operation == IDOVERRIDE_LIBRARY_OP_NOOP) {
/* We should restore that property to its reference value */
if (RNA_property_editable(ptr_local, rawprop)) {
IDOverrideLibraryPropertyOperation opop_tmp = {
.operation = IDOVERRIDE_LIBRARY_OP_REPLACE,
.subitem_reference_index = -1,
.subitem_local_index = -1,
};
rna_property_override_operation_apply(bmain,
ptr_local,
ptr_reference,
NULL,
rawprop,
rawprop,
NULL,
NULL,
NULL,
NULL,
&opop_tmp);
if (r_report_flags) {
*r_report_flags |= RNA_OVERRIDE_MATCH_RESULT_RESTORED;
/* This property should be restored to its reference value. This should not be done
* here, since this code may be called from non-main thread (modifying data through RNA
* is not thread safe). */
BLI_assert(op == NULL); /* Forbidden orverride prop should not exist currently. */
if (do_restore) {
IDOverrideLibraryPropertyOperation opop_tmp = {
.operation = IDOVERRIDE_LIBRARY_OP_REPLACE,
.subitem_reference_index = -1,
.subitem_local_index = -1,
};
rna_property_override_operation_apply(bmain,
ptr_local,
ptr_reference,
NULL,
rawprop,
rawprop,
NULL,
NULL,
NULL,
NULL,
&opop_tmp);
if (r_report_flags) {
*r_report_flags |= RNA_OVERRIDE_MATCH_RESULT_RESTORED;
}
}
else {
if (op == NULL) {
/* An override property is needed, create a temp one if necessary. */
op = BKE_lib_override_library_property_get(override, rna_path, NULL);
BKE_lib_override_library_operations_tag(op, IDOVERRIDE_LIBRARY_TAG_UNUSED, true);
}
BKE_lib_override_library_property_operation_get(
op, IDOVERRIDE_LIBRARY_OP_REPLACE, NULL, NULL, -1, -1, false, NULL, NULL);
BKE_lib_override_library_operations_tag(
op, IDOVERRIDE_LIBRARY_PROPERTY_TAG_NEEDS_RETORE, true);
override->runtime->tag |= IDOVERRIDE_LIBRARY_RUNTIME_TAG_NEEDS_RESTORE;
if (r_report_flags) {
*r_report_flags |= RNA_OVERRIDE_MATCH_RESULT_RESTORE_TAGGED;
}
}
}
else {
@ -850,7 +874,7 @@ bool RNA_struct_override_matches(Main *bmain,
else if ((report_flags & RNA_OVERRIDE_MATCH_RESULT_CREATED) == 0 && ELEM(NULL, op, opop)) {
/* This property is not overridden, and differs from reference, so we have no match. */
matching = false;
if (!(do_create || do_restore)) {
if (!(do_create || do_restore || do_tag_for_restore)) {
/* Since we have no 'changing' action allowed, we can break here. */
if (rna_path != rna_path_buffer) {
MEM_freeN(rna_path);
@ -1188,132 +1212,30 @@ void RNA_struct_override_apply(Main *bmain,
#ifdef DEBUG_OVERRIDE_TIMEIT
TIMEIT_START_AVERAGED(RNA_struct_override_apply);
#endif
const bool do_restore_only = (flag & RNA_OVERRIDE_APPLY_FLAG_RESTORE_ONLY) != 0;
/* NOTE: Applying insert operations in a separate pass is mandatory.
* We could optimize this later, but for now, as inefficient as it is,
* don't think this is a critical point.
*/
bool do_insert = false;
for (int i = 0; i < 2; i++, do_insert = true) {
for (int i = 0; i < (do_restore_only ? 1 : 2); i++, do_insert = true) {
LISTBASE_FOREACH (IDOverrideLibraryProperty *, op, &override->properties) {
if (do_restore_only && (op->tag % IDOVERRIDE_LIBRARY_PROPERTY_TAG_NEEDS_RETORE) == 0) {
continue;
}
/* That tag should only exist for short lifespan when restoring values from reference linked
* data. */
BLI_assert((op->tag & IDOVERRIDE_LIBRARY_PROPERTY_TAG_NEEDS_RETORE) == 0 || do_restore_only);
/* Simplified for now! */
PointerRNA data_src, data_dst;
PointerRNA data_item_src, data_item_dst;
PropertyRNA *prop_src, *prop_dst;
if (RNA_path_resolve_property_and_item_pointer(
ptr_dst, op->rna_path, &data_dst, &prop_dst, &data_item_dst) &&
RNA_path_resolve_property_and_item_pointer(
ptr_src, op->rna_path, &data_src, &prop_src, &data_item_src)) {
PointerRNA data_storage, data_item_storage;
PropertyRNA *prop_storage = NULL;
/* It is totally OK if this does not success,
* only a subset of override operations actually need storage. */
if (ptr_storage && (ptr_storage->owner_id != NULL)) {
RNA_path_resolve_property_and_item_pointer(
ptr_storage, op->rna_path, &data_storage, &prop_storage, &data_item_storage);
}
/* Check if an overridden ID pointer supposed to be in sync with linked data gets out of
* sync. */
if ((ptr_dst->owner_id->tag & LIB_TAG_LIB_OVERRIDE_NEED_RESYNC) == 0) {
if (op->rna_prop_type == PROP_POINTER &&
(((IDOverrideLibraryPropertyOperation *)op->operations.first)->flag &
IDOVERRIDE_LIBRARY_FLAG_IDPOINTER_MATCH_REFERENCE) != 0) {
BLI_assert(RNA_struct_is_ID(RNA_property_pointer_type(&data_src, prop_src)));
BLI_assert(ptr_src->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_src, NULL, NULL));
BLI_assert(ptr_dst->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_dst, NULL, NULL));
PointerRNA prop_ptr_src = RNA_property_pointer_get(&data_src, prop_src);
PointerRNA prop_ptr_dst = RNA_property_pointer_get(&data_dst, prop_dst);
rna_property_override_check_resync(
bmain, ptr_dst, ptr_src, &prop_ptr_dst, &prop_ptr_src);
}
else if (op->rna_prop_type == PROP_COLLECTION) {
if (RNA_struct_is_ID(RNA_property_pointer_type(&data_src, prop_src))) {
BLI_assert(ptr_src->owner_id == rna_property_override_property_real_id_owner(
bmain, &data_src, NULL, NULL));
BLI_assert(ptr_dst->owner_id == rna_property_override_property_real_id_owner(
bmain, &data_dst, NULL, NULL));
LISTBASE_FOREACH (IDOverrideLibraryPropertyOperation *, opop, &op->operations) {
if ((opop->flag & IDOVERRIDE_LIBRARY_FLAG_IDPOINTER_MATCH_REFERENCE) == 0) {
continue;
}
PointerRNA *ptr_item_dst, *ptr_item_src;
PointerRNA private_ptr_item_dst, private_ptr_item_src;
rna_porperty_override_collection_subitem_lookup(ptr_dst,
ptr_src,
NULL,
prop_dst,
prop_src,
NULL,
&ptr_item_dst,
&ptr_item_src,
NULL,
&private_ptr_item_dst,
&private_ptr_item_src,
NULL,
op,
opop);
rna_property_override_check_resync(
bmain, ptr_dst, ptr_src, ptr_item_dst, ptr_item_src);
}
}
}
}
/* Workaround for older broken overrides, we then assume that non-matching ID pointers
* override operations that replace a non-NULL value are 'mistakes', and ignore (do not
* apply) them. */
if ((flag & RNA_OVERRIDE_APPLY_FLAG_IGNORE_ID_POINTERS) != 0 &&
op->rna_prop_type == PROP_POINTER &&
(((IDOverrideLibraryPropertyOperation *)op->operations.first)->flag &
IDOVERRIDE_LIBRARY_FLAG_IDPOINTER_MATCH_REFERENCE) == 0) {
BLI_assert(ptr_src->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_src, NULL, NULL));
BLI_assert(ptr_dst->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_dst, NULL, NULL));
PointerRNA prop_ptr_dst = RNA_property_pointer_get(&data_dst, prop_dst);
if (prop_ptr_dst.type != NULL && RNA_struct_is_ID(prop_ptr_dst.type)) {
#ifndef NDEBUG
PointerRNA prop_ptr_src = RNA_property_pointer_get(&data_src, prop_src);
BLI_assert(prop_ptr_src.type == NULL || RNA_struct_is_ID(prop_ptr_src.type));
#endif
ID *id_dst = rna_property_override_property_real_id_owner(
bmain, &prop_ptr_dst, NULL, NULL);
if (id_dst != NULL) {
CLOG_INFO(&LOG,
4,
"%s: Ignoring local override on ID pointer property '%s', as requested by "
"RNA_OVERRIDE_APPLY_FLAG_IGNORE_ID_POINTERS flag",
ptr_dst->owner_id->name,
op->rna_path);
continue;
}
}
}
rna_property_override_apply_ex(bmain,
&data_dst,
&data_src,
prop_storage ? &data_storage : NULL,
prop_dst,
prop_src,
prop_storage,
&data_item_dst,
&data_item_src,
prop_storage ? &data_item_storage : NULL,
op,
do_insert);
}
else {
if (!(RNA_path_resolve_property_and_item_pointer(
ptr_dst, op->rna_path, &data_dst, &prop_dst, &data_item_dst) &&
RNA_path_resolve_property_and_item_pointer(
ptr_src, op->rna_path, &data_src, &prop_src, &data_item_src))) {
CLOG_INFO(&LOG,
4,
"Failed to apply library override operation to '%s.%s' "
@ -1322,7 +1244,117 @@ void RNA_struct_override_apply(Main *bmain,
op->rna_path,
RNA_path_resolve_property(ptr_dst, op->rna_path, &data_dst, &prop_dst),
RNA_path_resolve_property(ptr_src, op->rna_path, &data_src, &prop_src));
continue;
}
PointerRNA data_storage, data_item_storage;
PropertyRNA *prop_storage = NULL;
/* It is totally OK if this does not success,
* only a subset of override operations actually need storage. */
if (ptr_storage && (ptr_storage->owner_id != NULL)) {
RNA_path_resolve_property_and_item_pointer(
ptr_storage, op->rna_path, &data_storage, &prop_storage, &data_item_storage);
}
/* Check if an overridden ID pointer supposed to be in sync with linked data gets out of
* sync. */
if ((flag & RNA_OVERRIDE_APPLY_FLAG_SKIP_RESYNC_CHECK) == 0 &&
(ptr_dst->owner_id->tag & LIB_TAG_LIB_OVERRIDE_NEED_RESYNC) == 0) {
if (op->rna_prop_type == PROP_POINTER &&
(((IDOverrideLibraryPropertyOperation *)op->operations.first)->flag &
IDOVERRIDE_LIBRARY_FLAG_IDPOINTER_MATCH_REFERENCE) != 0) {
BLI_assert(RNA_struct_is_ID(RNA_property_pointer_type(&data_src, prop_src)));
BLI_assert(ptr_src->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_src, NULL, NULL));
BLI_assert(ptr_dst->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_dst, NULL, NULL));
PointerRNA prop_ptr_src = RNA_property_pointer_get(&data_src, prop_src);
PointerRNA prop_ptr_dst = RNA_property_pointer_get(&data_dst, prop_dst);
rna_property_override_check_resync(
bmain, ptr_dst, ptr_src, &prop_ptr_dst, &prop_ptr_src);
}
else if (op->rna_prop_type == PROP_COLLECTION) {
if (RNA_struct_is_ID(RNA_property_pointer_type(&data_src, prop_src))) {
BLI_assert(ptr_src->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_src, NULL, NULL));
BLI_assert(ptr_dst->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_dst, NULL, NULL));
LISTBASE_FOREACH (IDOverrideLibraryPropertyOperation *, opop, &op->operations) {
if ((opop->flag & IDOVERRIDE_LIBRARY_FLAG_IDPOINTER_MATCH_REFERENCE) == 0) {
continue;
}
PointerRNA *ptr_item_dst, *ptr_item_src;
PointerRNA private_ptr_item_dst, private_ptr_item_src;
rna_porperty_override_collection_subitem_lookup(ptr_dst,
ptr_src,
NULL,
prop_dst,
prop_src,
NULL,
&ptr_item_dst,
&ptr_item_src,
NULL,
&private_ptr_item_dst,
&private_ptr_item_src,
NULL,
op,
opop);
rna_property_override_check_resync(
bmain, ptr_dst, ptr_src, ptr_item_dst, ptr_item_src);
}
}
}
}
/* Workaround for older broken overrides, we then assume that non-matching ID pointers
* override operations that replace a non-NULL value are 'mistakes', and ignore (do not
* apply) them. */
if ((flag & RNA_OVERRIDE_APPLY_FLAG_IGNORE_ID_POINTERS) != 0 &&
op->rna_prop_type == PROP_POINTER &&
(((IDOverrideLibraryPropertyOperation *)op->operations.first)->flag &
IDOVERRIDE_LIBRARY_FLAG_IDPOINTER_MATCH_REFERENCE) == 0) {
BLI_assert(ptr_src->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_src, NULL, NULL));
BLI_assert(ptr_dst->owner_id ==
rna_property_override_property_real_id_owner(bmain, &data_dst, NULL, NULL));
PointerRNA prop_ptr_dst = RNA_property_pointer_get(&data_dst, prop_dst);
if (prop_ptr_dst.type != NULL && RNA_struct_is_ID(prop_ptr_dst.type)) {
#ifndef NDEBUG
PointerRNA prop_ptr_src = RNA_property_pointer_get(&data_src, prop_src);
BLI_assert(prop_ptr_src.type == NULL || RNA_struct_is_ID(prop_ptr_src.type));
#endif
ID *id_dst = rna_property_override_property_real_id_owner(
bmain, &prop_ptr_dst, NULL, NULL);
if (id_dst != NULL) {
CLOG_INFO(&LOG,
4,
"%s: Ignoring local override on ID pointer property '%s', as requested by "
"RNA_OVERRIDE_APPLY_FLAG_IGNORE_ID_POINTERS flag",
ptr_dst->owner_id->name,
op->rna_path);
continue;
}
}
}
rna_property_override_apply_ex(bmain,
&data_dst,
&data_src,
prop_storage ? &data_storage : NULL,
prop_dst,
prop_src,
prop_storage,
&data_item_dst,
&data_item_src,
prop_storage ? &data_item_storage : NULL,
op,
do_insert);
}
}

View File

@ -6392,10 +6392,6 @@ static void rna_def_userdef_experimental(BlenderRNA *brna)
RNA_def_property_ui_text(
prop, "Sculpt Mode Tilt Support", "Support for pen tablet tilt events in Sculpt Mode");
prop = RNA_def_property(srna, "use_realtime_compositor", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "use_realtime_compositor", 1);
RNA_def_property_ui_text(prop, "Realtime Compositor", "Enable the new realtime compositor");
prop = RNA_def_property(srna, "use_sculpt_texture_paint", PROP_BOOLEAN, PROP_NONE);
RNA_def_property_boolean_sdna(prop, NULL, "use_sculpt_texture_paint", 1);
RNA_def_property_ui_text(prop, "Sculpt Texture Paint", "Use texture painting in Sculpt Mode");

View File

@ -100,7 +100,7 @@ class BoxMaskOperation : public NodeOperation {
Domain compute_domain() override
{
if (get_input("Mask").is_single_value()) {
return Domain(context().get_output_size());
return Domain(context().get_compositing_region_size());
}
return get_input("Mask").domain();
}

View File

@ -81,9 +81,15 @@ class CompositeOperation : public NodeOperation {
/* Executes when the alpha channel of the image is ignored. */
void execute_ignore_alpha()
{
GPUShader *shader = shader_manager().get("compositor_convert_color_to_opaque");
GPUShader *shader = shader_manager().get("compositor_write_output_opaque");
GPU_shader_bind(shader);
/* The compositing space might be limited to a subset of the output texture, so only write into
* that compositing region. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const Result &image = get_input("Image");
image.bind_as_texture(shader, "input_tx");
@ -91,7 +97,8 @@ class CompositeOperation : public NodeOperation {
const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
GPU_texture_image_bind(output_texture, image_unit);
compute_dispatch_threads_at_least(shader, compute_domain().size);
const int2 compositing_region_size = context().get_compositing_region_size();
compute_dispatch_threads_at_least(shader, compositing_region_size);
image.unbind_as_texture();
GPU_texture_image_unbind(output_texture);
@ -102,22 +109,44 @@ class CompositeOperation : public NodeOperation {
* to the output texture. */
void execute_copy()
{
GPUShader *shader = shader_manager().get("compositor_write_output");
GPU_shader_bind(shader);
/* The compositing space might be limited to a subset of the output texture, so only write into
* that compositing region. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const Result &image = get_input("Image");
image.bind_as_texture(shader, "input_tx");
/* Make sure any prior writes to the texture are reflected before copying it. */
GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE);
GPUTexture *output_texture = context().get_output_texture();
const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
GPU_texture_image_bind(output_texture, image_unit);
GPU_texture_copy(context().get_output_texture(), image.texture());
const int2 compositing_region_size = context().get_compositing_region_size();
compute_dispatch_threads_at_least(shader, compositing_region_size);
image.unbind_as_texture();
GPU_texture_image_unbind(output_texture);
GPU_shader_unbind();
}
/* Executes when the alpha channel of the image is set as the value of the input alpha. */
void execute_set_alpha()
{
GPUShader *shader = shader_manager().get("compositor_set_alpha");
GPUShader *shader = shader_manager().get("compositor_write_output_alpha");
GPU_shader_bind(shader);
/* The compositing space might be limited to a subset of the output texture, so only write into
* that compositing region. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const Result &image = get_input("Image");
image.bind_as_texture(shader, "image_tx");
image.bind_as_texture(shader, "input_tx");
const Result &alpha = get_input("Alpha");
alpha.bind_as_texture(shader, "alpha_tx");
@ -126,7 +155,8 @@ class CompositeOperation : public NodeOperation {
const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
GPU_texture_image_bind(output_texture, image_unit);
compute_dispatch_threads_at_least(shader, compute_domain().size);
const int2 compositing_region_size = context().get_compositing_region_size();
compute_dispatch_threads_at_least(shader, compositing_region_size);
image.unbind_as_texture();
alpha.unbind_as_texture();
@ -142,10 +172,11 @@ class CompositeOperation : public NodeOperation {
return bnode().custom2 & CMP_NODE_OUTPUT_IGNORE_ALPHA;
}
/* The operation domain have the same dimensions of the output without any transformations. */
/* The operation domain has the same size as the compositing region without any transformations
* applied. */
Domain compute_domain() override
{
return Domain(context().get_output_size());
return Domain(context().get_compositing_region_size());
}
};

View File

@ -98,7 +98,7 @@ class EllipseMaskOperation : public NodeOperation {
Domain compute_domain() override
{
if (get_input("Mask").is_single_value()) {
return Domain(context().get_output_size());
return Domain(context().get_compositing_region_size());
}
return get_input("Mask").domain();
}

View File

@ -9,6 +9,7 @@
#include "BLI_linklist.h"
#include "BLI_math_vec_types.hh"
#include "BLI_rect.h"
#include "BLI_utildefines.h"
#include "BLT_translation.h"
@ -23,6 +24,7 @@
#include "DEG_depsgraph_query.h"
#include "DNA_scene_types.h"
#include "DNA_vec_types.h"
#include "RE_engine.h"
#include "RE_pipeline.h"
@ -825,30 +827,9 @@ class RenderLayerOperation : public NodeOperation {
{
const int view_layer = bnode().custom1;
GPUTexture *pass_texture = context().get_input_texture(view_layer, SCE_PASS_COMBINED);
const int2 size = int2(GPU_texture_width(pass_texture), GPU_texture_height(pass_texture));
/* Compute image output. */
Result &image_result = get_result("Image");
image_result.allocate_texture(Domain(size));
GPU_texture_copy(image_result.texture(), pass_texture);
/* Compute alpha output. */
Result &alpha_result = get_result("Alpha");
alpha_result.allocate_texture(Domain(size));
GPUShader *shader = shader_manager().get("compositor_extract_alpha_from_color");
GPU_shader_bind(shader);
const int input_unit = GPU_shader_get_texture_binding(shader, "input_tx");
GPU_texture_bind(pass_texture, input_unit);
alpha_result.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, size);
GPU_shader_unbind();
GPU_texture_unbind(pass_texture);
alpha_result.unbind_as_image();
execute_image(pass_texture);
execute_alpha(pass_texture);
/* Other output passes are not supported for now, so allocate them as invalid. */
for (const bNodeSocket *output : this->node()->output_sockets()) {
@ -861,6 +842,66 @@ class RenderLayerOperation : public NodeOperation {
}
}
}
void execute_image(GPUTexture *pass_texture)
{
Result &image_result = get_result("Image");
if (!image_result.should_compute()) {
return;
}
GPUShader *shader = shader_manager().get("compositor_read_pass");
GPU_shader_bind(shader);
/* The compositing space might be limited to a subset of the pass texture, so only read that
* compositing region into an appropriately sized texture. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const int input_unit = GPU_shader_get_texture_binding(shader, "input_tx");
GPU_texture_bind(pass_texture, input_unit);
const int2 compositing_region_size = context().get_compositing_region_size();
image_result.allocate_texture(Domain(compositing_region_size));
image_result.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, compositing_region_size);
GPU_shader_unbind();
GPU_texture_unbind(pass_texture);
image_result.unbind_as_image();
}
void execute_alpha(GPUTexture *pass_texture)
{
Result &alpha_result = get_result("Alpha");
if (!alpha_result.should_compute()) {
return;
}
GPUShader *shader = shader_manager().get("compositor_read_pass_alpha");
GPU_shader_bind(shader);
/* The compositing space might be limited to a subset of the pass texture, so only read that
* compositing region into an appropriately sized texture. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const int input_unit = GPU_shader_get_texture_binding(shader, "input_tx");
GPU_texture_bind(pass_texture, input_unit);
const int2 compositing_region_size = context().get_compositing_region_size();
alpha_result.allocate_texture(Domain(compositing_region_size));
alpha_result.bind_as_image(shader, "output_img");
compute_dispatch_threads_at_least(shader, compositing_region_size);
GPU_shader_unbind();
GPU_texture_unbind(pass_texture);
alpha_result.unbind_as_image();
}
};
static NodeOperation *get_compositor_operation(Context &context, DNode node)

View File

@ -149,7 +149,7 @@ class ScaleOperation : public NodeOperation {
float2 get_scale_render_size_stretch()
{
const float2 input_size = float2(get_input("Image").domain().size);
const float2 render_size = float2(context().get_output_size());
const float2 render_size = float2(context().get_compositing_region_size());
return render_size / input_size;
}
@ -160,7 +160,7 @@ class ScaleOperation : public NodeOperation {
float2 get_scale_render_size_fit()
{
const float2 input_size = float2(get_input("Image").domain().size);
const float2 render_size = float2(context().get_output_size());
const float2 render_size = float2(context().get_compositing_region_size());
const float2 scale = render_size / input_size;
return float2(math::min(scale.x, scale.y));
}
@ -172,7 +172,7 @@ class ScaleOperation : public NodeOperation {
float2 get_scale_render_size_crop()
{
const float2 input_size = float2(get_input("Image").domain().size);
const float2 render_size = float2(context().get_output_size());
const float2 render_size = float2(context().get_compositing_region_size());
const float2 scale = render_size / input_size;
return float2(math::max(scale.x, scale.y));
}

View File

@ -60,10 +60,16 @@ class ViewerOperation : public NodeOperation {
GPUShader *shader = get_split_viewer_shader();
GPU_shader_bind(shader);
const int2 size = compute_domain().size;
/* The compositing space might be limited to a subset of the output texture, so only write into
* that compositing region. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
GPU_shader_uniform_1f(shader, "split_ratio", get_split_ratio());
GPU_shader_uniform_2iv(shader, "view_size", size);
const int2 compositing_region_size = context().get_compositing_region_size();
GPU_shader_uniform_2iv(shader, "view_size", compositing_region_size);
const Result &first_image = get_input("Image");
first_image.bind_as_texture(shader, "first_image_tx");
@ -74,7 +80,7 @@ class ViewerOperation : public NodeOperation {
const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
GPU_texture_image_bind(output_texture, image_unit);
compute_dispatch_threads_at_least(shader, size);
compute_dispatch_threads_at_least(shader, compositing_region_size);
first_image.unbind_as_texture();
second_image.unbind_as_texture();
@ -82,10 +88,11 @@ class ViewerOperation : public NodeOperation {
GPU_shader_unbind();
}
/* The operation domain have the same dimensions of the output without any transformations. */
/* The operation domain has the same size as the compositing region without any transformations
* applied. */
Domain compute_domain() override
{
return Domain(context().get_output_size());
return Domain(context().get_compositing_region_size());
}
GPUShader *get_split_viewer_shader()

View File

@ -110,9 +110,15 @@ class ViewerOperation : public NodeOperation {
/* Executes when the alpha channel of the image is ignored. */
void execute_ignore_alpha()
{
GPUShader *shader = shader_manager().get("compositor_convert_color_to_opaque");
GPUShader *shader = shader_manager().get("compositor_write_output_opaque");
GPU_shader_bind(shader);
/* The compositing space might be limited to a smaller region of the output texture, so only
* write into that compositing region. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const Result &image = get_input("Image");
image.bind_as_texture(shader, "input_tx");
@ -120,7 +126,8 @@ class ViewerOperation : public NodeOperation {
const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
GPU_texture_image_bind(output_texture, image_unit);
compute_dispatch_threads_at_least(shader, compute_domain().size);
const int2 compositing_region_size = context().get_compositing_region_size();
compute_dispatch_threads_at_least(shader, compositing_region_size);
image.unbind_as_texture();
GPU_texture_image_unbind(output_texture);
@ -131,22 +138,44 @@ class ViewerOperation : public NodeOperation {
* to the output texture. */
void execute_copy()
{
GPUShader *shader = shader_manager().get("compositor_write_output");
GPU_shader_bind(shader);
/* The compositing space might be limited to a smaller region of the output texture, so only
* write into that compositing region. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const Result &image = get_input("Image");
image.bind_as_texture(shader, "input_tx");
/* Make sure any prior writes to the texture are reflected before copying it. */
GPU_memory_barrier(GPU_BARRIER_TEXTURE_UPDATE);
GPUTexture *output_texture = context().get_output_texture();
const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
GPU_texture_image_bind(output_texture, image_unit);
GPU_texture_copy(context().get_output_texture(), image.texture());
const int2 compositing_region_size = context().get_compositing_region_size();
compute_dispatch_threads_at_least(shader, compositing_region_size);
image.unbind_as_texture();
GPU_texture_image_unbind(output_texture);
GPU_shader_unbind();
}
/* Executes when the alpha channel of the image is set as the value of the input alpha. */
void execute_set_alpha()
{
GPUShader *shader = shader_manager().get("compositor_set_alpha");
GPUShader *shader = shader_manager().get("compositor_write_output_alpha");
GPU_shader_bind(shader);
/* The compositing space might be limited to a smaller region of the output texture, so only
* write into that compositing region. */
const rcti compositing_region = context().get_compositing_region();
const int2 lower_bound = int2(compositing_region.xmin, compositing_region.ymin);
GPU_shader_uniform_2iv(shader, "compositing_region_lower_bound", lower_bound);
const Result &image = get_input("Image");
image.bind_as_texture(shader, "image_tx");
image.bind_as_texture(shader, "input_tx");
const Result &alpha = get_input("Alpha");
alpha.bind_as_texture(shader, "alpha_tx");
@ -155,7 +184,8 @@ class ViewerOperation : public NodeOperation {
const int image_unit = GPU_shader_get_texture_binding(shader, "output_img");
GPU_texture_image_bind(output_texture, image_unit);
compute_dispatch_threads_at_least(shader, compute_domain().size);
const int2 compositing_region_size = context().get_compositing_region_size();
compute_dispatch_threads_at_least(shader, compositing_region_size);
image.unbind_as_texture();
alpha.unbind_as_texture();
@ -171,10 +201,11 @@ class ViewerOperation : public NodeOperation {
return bnode().custom2 & CMP_NODE_OUTPUT_IGNORE_ALPHA;
}
/* The operation domain have the same dimensions of the output without any transformations. */
/* The operation domain has the same size as the compositing region without any transformations
* applied. */
Domain compute_domain() override
{
return Domain(context().get_output_size());
return Domain(context().get_compositing_region_size());
}
};